hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
45bbfb82b0dfc8832dc76f1498ae21aafa8f5729ced73f11516bb88439664619
|
import functools
import logging
import os
from pathlib import Path
import sys
import matplotlib
from matplotlib import backend_tools, cbook, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
StatusbarBase, TimerBase, ToolContainerBase, cursors)
from matplotlib.backend_managers import ToolManager
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
try:
import gi
except ImportError:
raise ImportError("The GTK3 backends require PyGObject")
try:
# :raises ValueError: If module/version is already loaded, already
# required, or unavailable.
gi.require_version("Gtk", "3.0")
except ValueError as e:
# in this case we want to re-raise as ImportError so the
# auto-backend selection logic correctly skips.
raise ImportError from e
from gi.repository import GLib, GObject, Gtk, Gdk
_log = logging.getLogger(__name__)
backend_version = "%s.%s.%s" % (
Gtk.get_major_version(), Gtk.get_micro_version(), Gtk.get_minor_version())
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
try:
cursord = {
cursors.MOVE : Gdk.Cursor.new(Gdk.CursorType.FLEUR),
cursors.HAND : Gdk.Cursor.new(Gdk.CursorType.HAND2),
cursors.POINTER : Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR),
cursors.SELECT_REGION : Gdk.Cursor.new(Gdk.CursorType.TCROSS),
cursors.WAIT : Gdk.Cursor.new(Gdk.CursorType.WATCH),
}
except TypeError as exc:
# Happens when running headless. Convert to ImportError to cooperate with
# backend switching.
raise ImportError(exc)
class TimerGTK3(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` using GTK3 for timer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should operate as single
shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = GLib.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
GLib.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if self.callbacks and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK3(Gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507: 'control',
65505: 'shift',
65513: 'alt',
65508: 'control',
65506: 'shift',
65514: 'alt',
65361: 'left',
65362: 'up',
65363: 'right',
65364: 'down',
65307: 'escape',
65470: 'f1',
65471: 'f2',
65472: 'f3',
65473: 'f4',
65474: 'f5',
65475: 'f6',
65476: 'f7',
65477: 'f8',
65478: 'f9',
65479: 'f10',
65480: 'f11',
65481: 'f12',
65300: 'scroll_lock',
65299: 'break',
65288: 'backspace',
65293: 'enter',
65379: 'insert',
65535: 'delete',
65360: 'home',
65367: 'end',
65365: 'pageup',
65366: 'pagedown',
65438: '0',
65436: '1',
65433: '2',
65435: '3',
65430: '4',
65437: '5',
65432: '6',
65429: '7',
65431: '8',
65434: '9',
65451: '+',
65453: '-',
65450: '*',
65455: '/',
65439: 'dec',
65421: 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.EXPOSURE_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK|
Gdk.EventMask.SCROLL_MASK)
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
GObject.GObject.__init__(self)
self._idle_draw_id = 0
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('draw', self.on_draw_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.connect('size_allocate', self.size_allocate)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(True)
self.set_can_focus(True)
self._renderer_init()
def destroy(self):
#Gtk.DrawingArea.destroy(self)
self.close_event()
if self._idle_draw_id != 0:
GLib.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
if event.direction==Gdk.ScrollDirection.UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return True # stop event propagation
def key_release_event(self, widget, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return True # stop event propagation
def motion_notify_event(self, widget, event):
if event.is_hint:
t, x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.get_state()
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.enter_notify_event(self, guiEvent=event, xy=(x, y))
def size_allocate(self, widget, allocation):
dpival = self.figure.dpi
winch = allocation.width / dpival
hinch = allocation.height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
modifiers = [
(Gdk.ModifierType.MOD4_MASK, 'super'),
(Gdk.ModifierType.MOD1_MASK, 'alt'),
(Gdk.ModifierType.CONTROL_MASK, 'ctrl'),
]
for key_mask, prefix in modifiers:
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if widget.get_property("window") is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches(w / dpi, h / dpi, forward=False)
return False # finish event propagation?
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK3Agg or GTK3Cairo
pass
def draw(self):
# docstring inherited
if self.is_drawable():
self.queue_draw()
def draw_idle(self):
# docstring inherited
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = GLib.idle_add(idle_draw)
def new_timer(self, *args, **kwargs):
# docstring inherited
return TimerGTK3(*args, **kwargs)
def flush_events(self):
# docstring inherited
Gdk.threads_enter()
while Gtk.events_pending():
Gtk.main_iteration()
Gdk.flush()
Gdk.threads_leave()
class FigureManagerGTK3(FigureManagerBase):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : Gtk.Toolbar
The Gtk.Toolbar
vbox : Gtk.VBox
The Gtk.VBox containing the canvas and toolbar
window : Gtk.Window
The Gtk.Window
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.window = Gtk.Window()
self.window.set_wmclass("matplotlib", "Matplotlib")
self.set_window_title("Figure %d" % num)
try:
self.window.set_icon_from_file(window_icon)
except Exception:
# Some versions of gtk throw a glib.GError but not all, so I am not
# sure how to catch it. I am unhappy doing a blanket catch here,
# but am not sure what a better way is - JDH
_log.info('Could not load matplotlib icon: %s', sys.exc_info()[1])
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True, 0)
# calculate size for window
w = int(self.canvas.figure.bbox.width)
h = int(self.canvas.figure.bbox.height)
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar()
self.statusbar = None
def add_widget(child, expand, fill, padding):
child.show()
self.vbox.pack_end(child, False, False, 0)
size_request = child.size_request()
return size_request.height
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarGTK3(self.toolmanager)
h += add_widget(self.statusbar, False, False, 0)
h += add_widget(Gtk.HSeparator(), False, False, 0)
if self.toolbar is not None:
self.toolbar.show()
h += add_widget(self.toolbar, False, False, 0)
self.window.set_default_size(w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
self.canvas.grab_focus()
def destroy(self, *args):
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
if self.toolbar:
self.toolbar.destroy()
if (Gcf.get_num_fig_managers() == 0 and
not matplotlib.is_interactive() and
Gtk.main_level() >= 1):
Gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
self.window.present()
def full_screen_toggle(self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK3(self.canvas, self.window)
elif rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarGTK3(self.toolmanager)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
# must be initialised after toolbar has been set
if rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas.figure)
else:
toolmanager = None
return toolmanager
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK3(NavigationToolbar2, Gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
GObject.GObject.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self.ctx = None
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.get_property("window").set_cursor(cursord[cursor])
Gtk.main_iteration()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
self.ctx = self.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.canvas.draw()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0, x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
def _init_toolbar(self):
self.set_style(Gtk.ToolbarStyle.ICONS)
basedir = os.path.join(rcParams['datapath'], 'images')
self._gtk_ids = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert(Gtk.SeparatorToolItem(), -1)
continue
fname = os.path.join(basedir, image_file + '.png')
image = Gtk.Image()
image.set_from_file(fname)
self._gtk_ids[text] = tbutton = Gtk.ToolButton()
tbutton.set_label(text)
tbutton.set_icon_widget(image)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip_text(tooltip_text)
toolitem = Gtk.SeparatorToolItem()
self.insert(toolitem, -1)
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = Gtk.ToolItem()
self.insert(toolitem, -1)
self.message = Gtk.Label()
toolitem.add(self.message)
self.show_all()
@cbook.deprecated("3.1")
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams['savefig.directory']),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
dialog = Gtk.FileChooserDialog(
title="Save the figure",
parent=self.canvas.get_toplevel(),
action=Gtk.FileChooserAction.SAVE,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
)
for name, fmts \
in self.canvas.get_supported_filetypes_grouped().items():
ff = Gtk.FileFilter()
ff.set_name(name)
for fmt in fmts:
ff.add_pattern("*." + fmt)
dialog.add_filter(ff)
if self.canvas.get_default_filetype() in fmts:
dialog.set_filter(ff)
@functools.partial(dialog.connect, "notify::filter")
def on_notify_filter(*args):
name = dialog.get_filter().get_name()
fmt = self.canvas.get_supported_filetypes_grouped()[name][0]
dialog.set_current_name(
str(Path(dialog.get_current_name()).with_suffix("." + fmt)))
dialog.set_current_folder(rcParams["savefig.directory"])
dialog.set_current_name(self.canvas.get_default_filename())
dialog.set_do_overwrite_confirmation(True)
response = dialog.run()
fname = dialog.get_filename()
ff = dialog.get_filter() # Doesn't autoadjust to filename :/
fmt = self.canvas.get_supported_filetypes_grouped()[ff.get_name()][0]
dialog.destroy()
if response == Gtk.ResponseType.CANCEL:
return
# Save dir for next time, unless empty str (which means use cwd).
if rcParams['savefig.directory']:
rcParams['savefig.directory'] = os.path.dirname(fname)
try:
self.canvas.figure.savefig(fname, format=fmt)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6, 3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int(toolfig.bbox.width)
h = int(toolfig.bbox.height)
window = Gtk.Window()
try:
window.set_icon_from_file(window_icon)
except Exception:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = Gtk.Box()
vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True, 0)
window.show()
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
def set_history_buttons(self):
can_backward = self._nav_stack._pos > 0
can_forward = self._nav_stack._pos < len(self._nav_stack._elements) - 1
self._gtk_ids['Back'].set_sensitive(can_backward)
self._gtk_ids['Forward'].set_sensitive(can_forward)
@cbook.deprecated("3.1")
class FileChooserDialog(Gtk.FileChooserDialog):
"""GTK+ file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__(self,
title = 'Save file',
parent = None,
action = Gtk.FileChooserAction.SAVE,
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
path = None,
filetypes = [],
default_filetype = None
):
super().__init__(title, parent, action, buttons)
self.set_default_response(Gtk.ResponseType.OK)
self.set_do_overwrite_confirmation(True)
if not path:
path = os.getcwd()
# create an extra widget to list supported image formats
self.set_current_folder(path)
self.set_current_name('image.' + default_filetype)
hbox = Gtk.Box(spacing=10)
hbox.pack_start(Gtk.Label(label="File Format:"), False, False, 0)
liststore = Gtk.ListStore(GObject.TYPE_STRING)
cbox = Gtk.ComboBox()
cbox.set_model(liststore)
cell = Gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start(cbox, False, False, 0)
self.filetypes = filetypes
sorted_filetypes = sorted(filetypes.items())
default = 0
for i, (ext, name) in enumerate(sorted_filetypes):
liststore.append(["%s (*.%s)" % (name, ext)])
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed(cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name(filename)
cbox.connect("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
@cbook.deprecated("3.0", alternative="sorted(self.filetypes.items())")
def sorted_filetypes(self):
return sorted(self.filetypes.items())
def get_filename_from_user(self):
if self.run() == int(Gtk.ResponseType.OK):
return self.get_filename(), self.ext
else:
return None, self.ext
class ToolbarGTK3(ToolContainerBase, Gtk.Box):
_icon_extension = '.png'
def __init__(self, toolmanager):
ToolContainerBase.__init__(self, toolmanager)
Gtk.Box.__init__(self)
self.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea = Gtk.Box()
self._toolarea.set_property('orientation', Gtk.Orientation.HORIZONTAL)
self.pack_start(self._toolarea, False, False, 0)
self._toolarea.show_all()
self._groups = {}
self._toolitems = {}
def add_toolitem(self, name, group, position, image_file, description,
toggle):
if toggle:
tbutton = Gtk.ToggleToolButton()
else:
tbutton = Gtk.ToolButton()
tbutton.set_label(name)
if image_file is not None:
image = Gtk.Image()
image.set_from_file(image_file)
tbutton.set_icon_widget(image)
if position is None:
position = -1
self._add_button(tbutton, group, position)
signal = tbutton.connect('clicked', self._call_tool, name)
tbutton.set_tooltip_text(description)
tbutton.show_all()
self._toolitems.setdefault(name, [])
self._toolitems[name].append((tbutton, signal))
def _add_button(self, button, group, position):
if group not in self._groups:
if self._groups:
self._add_separator()
toolbar = Gtk.Toolbar()
toolbar.set_style(Gtk.ToolbarStyle.ICONS)
self._toolarea.pack_start(toolbar, False, False, 0)
toolbar.show_all()
self._groups[group] = toolbar
self._groups[group].insert(button, position)
def _call_tool(self, btn, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem, signal in self._toolitems[name]:
toolitem.handler_block(signal)
toolitem.set_active(toggled)
toolitem.handler_unblock(signal)
def remove_toolitem(self, name):
if name not in self._toolitems:
self.toolmanager.message_event('%s Not in toolbar' % name, self)
return
for group in self._groups:
for toolitem, _signal in self._toolitems[name]:
if toolitem in self._groups[group]:
self._groups[group].remove(toolitem)
del self._toolitems[name]
def _add_separator(self):
sep = Gtk.Separator()
sep.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea.pack_start(sep, False, True, 0)
sep.show_all()
class StatusbarGTK3(StatusbarBase, Gtk.Statusbar):
def __init__(self, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
Gtk.Statusbar.__init__(self)
self._context = self.get_context_id('message')
def set_message(self, s):
self.pop(self._context)
self.push(self._context, s)
class RubberbandGTK3(backend_tools.RubberbandBase):
def draw_rubberband(self, x0, y0, x1, y1):
NavigationToolbar2GTK3.draw_rubberband(
self._make_classic_style_pseudo_toolbar(), None, x0, y0, x1, y1)
class SaveFigureGTK3(backend_tools.SaveFigureBase):
@cbook.deprecated("3.1")
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.figure.canvas.manager.window,
path=os.path.expanduser(rcParams['savefig.directory']),
filetypes=self.figure.canvas.get_supported_filetypes(),
default_filetype=self.figure.canvas.get_default_filetype())
fc.set_current_name(self.figure.canvas.get_default_filename())
return fc
def trigger(self, *args, **kwargs):
class PseudoToolbar:
canvas = self.figure.canvas
return NavigationToolbar2GTK3.save_figure(PseudoToolbar())
class SetCursorGTK3(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
NavigationToolbar2GTK3.set_cursor(
self._make_classic_style_pseudo_toolbar(), cursor)
class ConfigureSubplotsGTK3(backend_tools.ConfigureSubplotsBase, Gtk.Window):
def __init__(self, *args, **kwargs):
backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
self.window = None
def init_window(self):
if self.window:
return
self.window = Gtk.Window(title="Subplot Configuration Tool")
try:
self.window.window.set_icon_from_file(window_icon)
except Exception:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.window.connect('destroy', self.destroy)
toolfig = Figure(figsize=(6, 3))
canvas = self.figure.canvas.__class__(toolfig)
toolfig.subplots_adjust(top=0.9)
SubplotTool(self.figure, toolfig)
w = int(toolfig.bbox.width)
h = int(toolfig.bbox.height)
self.window.set_default_size(w, h)
canvas.show()
self.vbox.pack_start(canvas, True, True, 0)
self.window.show()
def destroy(self, *args):
self.window.destroy()
self.window = None
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
def trigger(self, sender, event, data=None):
self.init_window()
self.window.present()
class HelpGTK3(backend_tools.ToolHelpBase):
def _normalize_shortcut(self, key):
"""
Convert Matplotlib key presses to GTK+ accelerator identifiers.
Related to `FigureCanvasGTK3._get_key`.
"""
special = {
'backspace': 'BackSpace',
'pagedown': 'Page_Down',
'pageup': 'Page_Up',
'scroll_lock': 'Scroll_Lock',
}
parts = key.split('+')
mods = ['<' + mod + '>' for mod in parts[:-1]]
key = parts[-1]
if key in special:
key = special[key]
elif len(key) > 1:
key = key.capitalize()
elif key.isupper():
mods += ['<shift>']
return ''.join(mods) + key
def _show_shortcuts_window(self):
section = Gtk.ShortcutsSection()
for name, tool in sorted(self.toolmanager.tools.items()):
if not tool.description:
continue
# Putting everything in a separate group allows GTK to
# automatically split them into separate columns/pages, which is
# useful because we have lots of shortcuts, some with many keys
# that are very wide.
group = Gtk.ShortcutsGroup()
section.add(group)
# A hack to remove the title since we have no group naming.
group.forall(lambda widget, data: widget.set_visible(False), None)
shortcut = Gtk.ShortcutsShortcut(
accelerator=' '.join(
self._normalize_shortcut(key)
for key in self.toolmanager.get_tool_keymap(name)
# Will never be sent:
if 'cmd+' not in key),
title=tool.name,
subtitle=tool.description)
group.add(shortcut)
window = Gtk.ShortcutsWindow(
title='Help',
modal=True,
transient_for=self._figure.canvas.get_toplevel())
section.show() # Must be done explicitly before add!
window.add(section)
window.show_all()
def _show_shortcuts_dialog(self):
dialog = Gtk.MessageDialog(
self._figure.canvas.get_toplevel(),
0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, self._get_help_text(),
title="Help")
dialog.run()
dialog.destroy()
def trigger(self, *args):
if Gtk.check_version(3, 20, 0) is None:
self._show_shortcuts_window()
else:
self._show_shortcuts_dialog()
class ToolCopyToClipboardGTK3(backend_tools.ToolCopyToClipboardBase):
def trigger(self, *args, **kwargs):
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
window = self.canvas.get_window()
x, y, width, height = window.get_geometry()
pb = Gdk.pixbuf_get_from_window(window, x, y, width, height)
clipboard.set_image(pb)
# Define the file to use as the GTk icon
if sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(
matplotlib.rcParams['datapath'], 'images', icon_filename)
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel Gtk.Window
parent = parent.get_toplevel()
if not parent.is_toplevel():
parent = None
if not isinstance(msg, str):
msg = ','.join(map(str, msg))
dialog = Gtk.MessageDialog(
parent = parent,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK,
message_format = msg)
dialog.run()
dialog.destroy()
backend_tools.ToolSaveFigure = SaveFigureGTK3
backend_tools.ToolConfigureSubplots = ConfigureSubplotsGTK3
backend_tools.ToolSetCursor = SetCursorGTK3
backend_tools.ToolRubberband = RubberbandGTK3
backend_tools.ToolHelp = HelpGTK3
backend_tools.ToolCopyToClipboard = ToolCopyToClipboardGTK3
Toolbar = ToolbarGTK3
@_Backend.export
class _BackendGTK3(_Backend):
required_interactive_framework = "gtk3"
FigureCanvas = FigureCanvasGTK3
FigureManager = FigureManagerGTK3
@staticmethod
def trigger_manager_draw(manager):
manager.canvas.draw_idle()
@staticmethod
def mainloop():
if Gtk.main_level() == 0:
Gtk.main()
|
921a0bfd61d3c3aad80a107c450c4791544ad52a2d85b5e7ffcc49b111dffbd0
|
from contextlib import contextmanager
import logging
import math
import os.path
import sys
import tkinter as tk
from tkinter.simpledialog import SimpleDialog
import tkinter.filedialog
import tkinter.messagebox
import numpy as np
import matplotlib
from matplotlib import backend_tools, cbook, rcParams
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
StatusbarBase, TimerBase, ToolContainerBase, cursors)
from matplotlib.backend_managers import ToolManager
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from . import _tkagg
try:
from ._tkagg import Win32_GetForegroundWindow, Win32_SetForegroundWindow
except ImportError:
@contextmanager
def _restore_foreground_window_at_end():
yield
else:
@contextmanager
def _restore_foreground_window_at_end():
foreground = Win32_GetForegroundWindow()
try:
yield
finally:
if rcParams['tk.window_focus']:
Win32_SetForegroundWindow(foreground)
_log = logging.getLogger(__name__)
backend_version = tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
cursors.WAIT: "watch",
}
def blit(photoimage, aggimage, offsets, bbox=None):
"""
Blit *aggimage* to *photoimage*.
*offsets* is a tuple describing how to fill the ``offset`` field of the
``Tk_PhotoImageBlock`` struct: it should be (0, 1, 2, 3) for RGBA8888 data,
(2, 1, 0, 3) for little-endian ARBG32 (i.e. GBRA8888) data and (1, 2, 3, 0)
for big-endian ARGB32 (i.e. ARGB8888) data.
If *bbox* is passed, it defines the region that gets blitted.
"""
data = np.asarray(aggimage)
height, width = data.shape[:2]
dataptr = (height, width, data.ctypes.data)
if bbox is not None:
(x1, y1), (x2, y2) = bbox.__array__()
bboxptr = (math.floor(x1), math.ceil(x2),
math.floor(y1), math.ceil(y2))
else:
photoimage.blank()
bboxptr = (0, width, 0, height)
_tkagg.blit(
photoimage.tk.interpaddr(), str(photoimage), dataptr, offsets, bboxptr)
class TimerTk(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Tk's timer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should operate as single
shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
self.parent = parent
self._timer = None
def _timer_start(self):
self._timer_stop()
self._timer = self.parent.after(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
self.parent.after_cancel(self._timer)
self._timer = None
def _on_timer(self):
TimerBase._on_timer(self)
# Tk after() is only a single shot, so we need to add code here to
# reset the timer if we're not operating in single shot mode. However,
# if _timer is None, this means that _timer_stop has been called; so
# don't recreate the timer in that case.
if not self._single and self._timer:
self._timer = self.parent.after(self._interval, self._on_timer)
else:
self._timer = None
class FigureCanvasTk(FigureCanvasBase):
keyvald = {65507: 'control',
65505: 'shift',
65513: 'alt',
65515: 'super',
65508: 'control',
65506: 'shift',
65514: 'alt',
65361: 'left',
65362: 'up',
65363: 'right',
65364: 'down',
65307: 'escape',
65470: 'f1',
65471: 'f2',
65472: 'f3',
65473: 'f4',
65474: 'f5',
65475: 'f6',
65476: 'f7',
65477: 'f8',
65478: 'f9',
65479: 'f10',
65480: 'f11',
65481: 'f12',
65300: 'scroll_lock',
65299: 'break',
65288: 'backspace',
65293: 'enter',
65379: 'insert',
65535: 'delete',
65360: 'home',
65367: 'end',
65365: 'pageup',
65366: 'pagedown',
65438: '0',
65436: '1',
65433: '2',
65435: '3',
65430: '4',
65437: '5',
65432: '6',
65429: '7',
65431: '8',
65434: '9',
65451: '+',
65453: '-',
65450: '*',
65455: '/',
65439: 'dec',
65421: 'enter',
}
_keycode_lookup = {
262145: 'control',
524320: 'alt',
524352: 'alt',
1048584: 'super',
1048592: 'super',
131074: 'shift',
131076: 'shift',
}
"""_keycode_lookup is used for badly mapped (i.e. no event.key_sym set)
keys on apple keyboards."""
def __init__(self, figure, master=None, resize_callback=None):
super(FigureCanvasTk, self).__init__(figure)
self._idle = True
self._idle_callback = None
t1, t2, w, h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = tk.Canvas(
master=master, background="white",
width=w, height=h, borderwidth=0, highlightthickness=0)
self._tkphoto = tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w//2, h//2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<Enter>", self.enter_notify_event)
self._tkcanvas.bind("<Leave>", self.leave_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<Double-Button-1>", "<Double-Button-2>", "<Double-Button-3>":
self._tkcanvas.bind(name, self.button_dblclick_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows, "+")
# Can't get destroy events by binding to _tkcanvas. Therefore, bind
# to the window and filter.
def filter_destroy(evt):
if evt.widget is self._tkcanvas:
self._master.update_idletasks()
self.close_event()
root.bind("<Destroy>", filter_destroy, "+")
self._master = master
self._tkcanvas.focus_set()
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width / dpival
hinch = height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = tk.PhotoImage(
master=self._tkcanvas, width=int(width), height=int(height))
self._tkcanvas.create_image(
int(width / 2), int(height / 2), image=self._tkphoto)
self.resize_event()
self.draw()
# a resizing will in general move the pointer position
# relative to the canvas, so process it as a motion notify
# event. An intended side effect of this call is to allow
# window raises (which trigger a resize) to get the cursor
# position to the mpl event framework so key presses which are
# over the axes will work w/o clicks or explicit motion
self._update_pointer_position(event)
def _update_pointer_position(self, guiEvent=None):
"""
Figure out if we are inside the canvas or not and update the
canvas enter/leave events
"""
# if the pointer if over the canvas, set the lastx and lasty
# attrs of the canvas so it can process event w/o mouse click
# or move
# the window's upper, left coords in screen coords
xw = self._tkcanvas.winfo_rootx()
yw = self._tkcanvas.winfo_rooty()
# the pointer's location in screen coords
xp, yp = self._tkcanvas.winfo_pointerxy()
# not figure out the canvas coordinates of the pointer
xc = xp - xw
yc = yp - yw
# flip top/bottom
yc = self.figure.bbox.height - yc
# JDH: this method was written originally to get the pointer
# location to the backend lastx and lasty attrs so that events
# like KeyEvent can be handled without mouse events. e.g., if
# the cursor is already above the axes, then key presses like
# 'g' should toggle the grid. In order for this to work in
# backend_bases, the canvas needs to know _lastx and _lasty.
# There are three ways to get this info the canvas:
#
# 1) set it explicitly
#
# 2) call enter/leave events explicitly. The downside of this
# in the impl below is that enter could be repeatedly
# triggered if the mouse is over the axes and one is
# resizing with the keyboard. This is not entirely bad,
# because the mouse position relative to the canvas is
# changing, but it may be surprising to get repeated entries
# without leaves
#
# 3) process it as a motion notify event. This also has pros
# and cons. The mouse is moving relative to the window, but
# this may surprise an event handler writer who is getting
# motion_notify_events even if the mouse has not moved
# here are the three scenarios
if 1:
# just manually set it
self._lastx, self._lasty = xc, yc
elif 0:
# alternate implementation: process it as a motion
FigureCanvasBase.motion_notify_event(self, xc, yc, guiEvent)
elif 0:
# alternate implementation -- process enter/leave events
# instead of motion/notify
if self.figure.bbox.contains(xc, yc):
self.enter_notify_event(guiEvent, xy=(xc, yc))
else:
self.leave_notify_event(guiEvent)
def draw_idle(self):
# docstring inherited
if not self._idle:
return
self._idle = False
def idle_draw(*args):
try:
self.draw()
finally:
self._idle = True
self._idle_callback = self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def enter_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.enter_notify_event(self, guiEvent=event, xy=(x, y))
def button_press_event(self, event, dblclick=False):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform == 'darwin':
# 2 and 3 were reversed on the OSX platform I tested under tkagg.
if num == 2:
num = 3
elif num == 3:
num = 2
FigureCanvasBase.button_press_event(
self, x, y, num, dblclick=dblclick, guiEvent=event)
def button_dblclick_event(self, event):
self.button_press_event(event, dblclick=True)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform == 'darwin':
# 2 and 3 were reversed on the OSX platform I tested under tkagg.
if num == 2:
num = 3
elif num == 3:
num = 2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
step = 1 if num == 4 else -1 if num == 5 else 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif (val == 0 and sys.platform == 'darwin'
and event.keycode in self._keycode_lookup):
key = self._keycode_lookup[event.keycode]
elif val < 256:
key = chr(val)
else:
key = None
# add modifier keys to the key string. Bit details originate from
# http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm
# BIT_SHIFT = 0x001; BIT_CAPSLOCK = 0x002; BIT_CONTROL = 0x004;
# BIT_LEFT_ALT = 0x008; BIT_NUMLOCK = 0x010; BIT_RIGHT_ALT = 0x080;
# BIT_MB_1 = 0x100; BIT_MB_2 = 0x200; BIT_MB_3 = 0x400;
# In general, the modifier key is excluded from the modifier flag,
# however this is not the case on "darwin", so double check that
# we aren't adding repeat modifier flags to a modifier key.
if sys.platform == 'win32':
modifiers = [(17, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
elif sys.platform == 'darwin':
modifiers = [(3, 'super', 'super'),
(4, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
else:
modifiers = [(6, 'super', 'super'),
(3, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
if key is not None:
# shift is not added to the keys as this is already accounted for
for bitmask, prefix, key_name in modifiers:
if event.state & (1 << bitmask) and key_name not in key:
key = '{0}+{1}'.format(prefix, key)
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def new_timer(self, *args, **kwargs):
# docstring inherited
return TimerTk(self._tkcanvas, *args, **kwargs)
def flush_events(self):
# docstring inherited
self._master.update()
class FigureManagerTk(FigureManagerBase):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : tk.Toolbar
The tk.Toolbar
window : tk.Window
The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.set_window_title("Figure %d" % num)
self.canvas = canvas
# If using toolmanager it has to be present when initializing the
# toolbar
self.toolmanager = self._get_toolmanager()
# packing toolbar first, because if space is getting low, last packed
# widget is getting shrunk first (-> the canvas)
self.toolbar = self._get_toolbar()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self._num = num
self.statusbar = None
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarTk(self.window, self.toolmanager)
self._shown = False
def _get_toolbar(self):
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2Tk(self.canvas, self.window)
elif matplotlib.rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarTk(self.toolmanager, self.window)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
if rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas.figure)
else:
toolmanager = None
return toolmanager
def resize(self, width, height):
self.canvas._tkcanvas.master.geometry("%dx%d" % (width, height))
if self.toolbar is not None:
self.toolbar.configure(width=width)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
with _restore_foreground_window_at_end():
if not self._shown:
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
self.canvas._tkcanvas.bind("<Destroy>", destroy)
self.window.deiconify()
else:
self.canvas.draw_idle()
# Raise the new window.
self.canvas.manager.window.attributes('-topmost', 1)
self.canvas.manager.window.attributes('-topmost', 0)
self._shown = True
def destroy(self, *args):
if self.window is not None:
#self.toolbar.destroy()
if self.canvas._idle_callback:
self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)
self.window.destroy()
if Gcf.get_num_fig_managers() == 0:
if self.window is not None:
self.window.quit()
self.window = None
def get_window_title(self):
return self.window.wm_title()
def set_window_title(self, title):
self.window.wm_title(title)
def full_screen_toggle(self):
is_fullscreen = bool(self.window.attributes('-fullscreen'))
self.window.attributes('-fullscreen', not is_fullscreen)
class NavigationToolbar2Tk(NavigationToolbar2, tk.Frame):
"""
Attributes
----------
canvas : `FigureCanvas`
the figure canvas on which to operate
win : tk.Window
the tk.Window which owns this toolbar
"""
def __init__(self, canvas, window):
self.canvas = canvas
# Avoid using self.window (prefer self.canvas.manager.window), so that
# Tool implementations can reuse the methods.
self.window = window
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height - y0
y1 = height - y1
if hasattr(self, "lastrect"):
self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
def release(self, event):
if hasattr(self, "lastrect"):
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
window = self.canvas.manager.window
window.configure(cursor=cursord[cursor])
window.update_idletasks()
def _Button(self, text, file, command, extension='.gif'):
img_file = os.path.join(
rcParams['datapath'], 'images', file + extension)
im = tk.PhotoImage(master=self, file=img_file)
b = tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=tk.LEFT)
return b
def _Spacer(self):
# Buttons are 30px high. Make this 26px tall +2px padding to center it.
s = tk.Frame(
master=self, height=26, relief=tk.RIDGE, pady=2, bg="DarkGray")
s.pack(side=tk.LEFT, padx=5)
return s
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
tk.Frame.__init__(self, master=self.window,
width=int(width), height=int(height),
borderwidth=2)
self.update() # Make axes menu
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
# Add a spacer; return value is unused.
self._Spacer()
else:
button = self._Button(text=text, file=image_file,
command=getattr(self, callback))
if tooltip_text is not None:
ToolTip.createToolTip(button, tooltip_text)
self.message = tk.StringVar(master=self)
self._message_label = tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=tk.RIGHT)
self.pack(side=tk.BOTTOM, fill=tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6, 3))
window = tk.Toplevel()
canvas = type(self.canvas)(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
canvas.tool = SubplotTool(self.canvas.figure, toolfig)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
window.grab_set()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes.pop(default_filetype)
sorted_filetypes = ([(default_filetype, default_filetype_name)]
+ sorted(filetypes.items()))
tk_filetypes = [(name, '*.%s' % ext) for ext, name in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH!
#defaultextension = self.canvas.get_default_filetype()
defaultextension = ''
initialdir = os.path.expanduser(rcParams['savefig.directory'])
initialfile = self.canvas.get_default_filename()
fname = tkinter.filedialog.asksaveasfilename(
master=self.canvas.manager.window,
title='Save the figure',
filetypes=tk_filetypes,
defaultextension=defaultextension,
initialdir=initialdir,
initialfile=initialfile,
)
if fname in ["", ()]:
return
# Save dir for next time, unless empty str (i.e., use cwd).
if initialdir != "":
rcParams['savefig.directory'] = (
os.path.dirname(str(fname)))
try:
# This method will handle the delegation to the correct type
self.canvas.figure.savefig(fname)
except Exception as e:
tkinter.messagebox.showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [self._axes[i] for i in self._ind]
def update(self):
self._axes = self.canvas.figure.axes
with _restore_foreground_window_at_end():
NavigationToolbar2.update(self)
class ToolTip(object):
"""
Tooltip recipe from
http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml#e387
"""
@staticmethod
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, _ = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + self.widget.winfo_rooty()
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", relief=tk.SOLID, borderwidth=1)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
class RubberbandTk(backend_tools.RubberbandBase):
def draw_rubberband(self, x0, y0, x1, y1):
height = self.figure.canvas.figure.bbox.height
y0 = height - y0
y1 = height - y1
if hasattr(self, "lastrect"):
self.figure.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.figure.canvas._tkcanvas.create_rectangle(
x0, y0, x1, y1)
def remove_rubberband(self):
if hasattr(self, "lastrect"):
self.figure.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
class SetCursorTk(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
NavigationToolbar2Tk.set_cursor(
self._make_classic_style_pseudo_toolbar(), cursor)
class ToolbarTk(ToolContainerBase, tk.Frame):
_icon_extension = '.gif'
def __init__(self, toolmanager, window):
ToolContainerBase.__init__(self, toolmanager)
xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx
height, width = 50, xmax - xmin
tk.Frame.__init__(self, master=window,
width=int(width), height=int(height),
borderwidth=2)
self._toolitems = {}
self.pack(side=tk.TOP, fill=tk.X)
self._groups = {}
def add_toolitem(
self, name, group, position, image_file, description, toggle):
frame = self._get_groupframe(group)
button = self._Button(name, image_file, toggle, frame)
if description is not None:
ToolTip.createToolTip(button, description)
self._toolitems.setdefault(name, [])
self._toolitems[name].append(button)
def _get_groupframe(self, group):
if group not in self._groups:
if self._groups:
self._add_separator()
frame = tk.Frame(master=self, borderwidth=0)
frame.pack(side=tk.LEFT, fill=tk.Y)
self._groups[group] = frame
return self._groups[group]
def _add_separator(self):
separator = tk.Frame(master=self, bd=5, width=1, bg='black')
separator.pack(side=tk.LEFT, fill=tk.Y, padx=2)
def _Button(self, text, image_file, toggle, frame):
if image_file is not None:
im = tk.PhotoImage(master=self, file=image_file)
else:
im = None
if not toggle:
b = tk.Button(master=frame, text=text, padx=2, pady=2, image=im,
command=lambda: self._button_click(text))
else:
# There is a bug in tkinter included in some python 3.6 versions
# that without this variable, produces a "visual" toggling of
# other near checkbuttons
# https://bugs.python.org/issue29402
# https://bugs.python.org/issue25684
var = tk.IntVar()
b = tk.Checkbutton(master=frame, text=text, padx=2, pady=2,
image=im, indicatoron=False,
command=lambda: self._button_click(text),
variable=var)
b._ntimage = im
b.pack(side=tk.LEFT)
return b
def _button_click(self, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem in self._toolitems[name]:
if toggled:
toolitem.select()
else:
toolitem.deselect()
def remove_toolitem(self, name):
for toolitem in self._toolitems[name]:
toolitem.pack_forget()
del self._toolitems[name]
class StatusbarTk(StatusbarBase, tk.Frame):
def __init__(self, window, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx
height, width = 50, xmax - xmin
tk.Frame.__init__(self, master=window,
width=int(width), height=int(height),
borderwidth=2)
self._message = tk.StringVar(master=self)
self._message_label = tk.Label(master=self, textvariable=self._message)
self._message_label.pack(side=tk.RIGHT)
self.pack(side=tk.TOP, fill=tk.X)
def set_message(self, s):
self._message.set(s)
class SaveFigureTk(backend_tools.SaveFigureBase):
def trigger(self, *args):
NavigationToolbar2Tk.save_figure(
self._make_classic_style_pseudo_toolbar())
class ConfigureSubplotsTk(backend_tools.ConfigureSubplotsBase):
def __init__(self, *args, **kwargs):
backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
self.window = None
def trigger(self, *args):
self.init_window()
self.window.lift()
def init_window(self):
if self.window:
return
toolfig = Figure(figsize=(6, 3))
self.window = tk.Tk()
canvas = type(self.canvas)(toolfig, master=self.window)
toolfig.subplots_adjust(top=0.9)
SubplotTool(self.figure, toolfig)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.window.protocol("WM_DELETE_WINDOW", self.destroy)
def destroy(self, *args, **kwargs):
self.window.destroy()
self.window = None
class HelpTk(backend_tools.ToolHelpBase):
def trigger(self, *args):
dialog = SimpleDialog(
self.figure.canvas._tkcanvas, self._get_help_text(), ["OK"])
dialog.done = lambda num: dialog.frame.master.withdraw()
backend_tools.ToolSaveFigure = SaveFigureTk
backend_tools.ToolConfigureSubplots = ConfigureSubplotsTk
backend_tools.ToolSetCursor = SetCursorTk
backend_tools.ToolRubberband = RubberbandTk
backend_tools.ToolHelp = HelpTk
backend_tools.ToolCopyToClipboard = backend_tools.ToolCopyToClipboardBase
Toolbar = ToolbarTk
@_Backend.export
class _BackendTk(_Backend):
required_interactive_framework = "tk"
FigureManager = FigureManagerTk
@classmethod
def new_figure_manager_given_figure(cls, num, figure):
"""
Create a new figure manager instance for the given figure.
"""
with _restore_foreground_window_at_end():
window = tk.Tk(className="matplotlib")
window.withdraw()
# Put a mpl icon on the window rather than the default tk icon.
# Tkinter doesn't allow colour icons on linux systems, but tk>=8.5
# has a iconphoto command which we call directly. Source:
# http://mail.python.org/pipermail/tkinter-discuss/2006-November/000954.html
icon_fname = os.path.join(
rcParams['datapath'], 'images', 'matplotlib.ppm')
icon_img = tk.PhotoImage(file=icon_fname, master=window)
try:
window.iconphoto(False, icon_img)
except Exception as exc:
# log the failure (due e.g. to Tk version), but carry on
_log.info('Could not load matplotlib icon: %s', exc)
canvas = cls.FigureCanvas(figure, master=window)
manager = cls.FigureManager(canvas, num, window)
if matplotlib.is_interactive():
manager.show()
canvas.draw_idle()
return manager
@staticmethod
def trigger_manager_draw(manager):
manager.show()
@staticmethod
def mainloop():
managers = Gcf.get_all_fig_managers()
if managers:
managers[0].window.mainloop()
|
58cb252d6626b7196601a047e2dc781e39910d630a89257dd4a1c5f7278c4678
|
from . import _backend_tk
from .backend_agg import FigureCanvasAgg
from ._backend_tk import (
_BackendTk, FigureCanvasTk, FigureManagerTk, NavigationToolbar2Tk)
class FigureCanvasTkAgg(FigureCanvasAgg, FigureCanvasTk):
def draw(self):
super(FigureCanvasTkAgg, self).draw()
_backend_tk.blit(self._tkphoto, self.renderer._renderer, (0, 1, 2, 3))
self._master.update_idletasks()
def blit(self, bbox=None):
_backend_tk.blit(
self._tkphoto, self.renderer._renderer, (0, 1, 2, 3), bbox=bbox)
self._master.update_idletasks()
@_BackendTk.export
class _BackendTkAgg(_BackendTk):
FigureCanvas = FigureCanvasTkAgg
|
7550a1ed90ffd8e4740c143437789c0fc70357549e1330ac4636b8e3ac1684fc
|
import sys
import numpy as np
from . import _backend_tk
from .backend_cairo import cairo, FigureCanvasCairo, RendererCairo
from ._backend_tk import _BackendTk, FigureCanvasTk
class FigureCanvasTkCairo(FigureCanvasCairo, FigureCanvasTk):
def __init__(self, *args, **kwargs):
super(FigureCanvasTkCairo, self).__init__(*args, **kwargs)
self._renderer = RendererCairo(self.figure.dpi)
def draw(self):
width = int(self.figure.bbox.width)
height = int(self.figure.bbox.height)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
self._renderer.set_ctx_from_surface(surface)
self._renderer.set_width_height(width, height)
self.figure.draw(self._renderer)
buf = np.reshape(surface.get_data(), (height, width, 4))
_backend_tk.blit(
self._tkphoto, buf,
(2, 1, 0, 3) if sys.byteorder == "little" else (1, 2, 3, 0))
self._master.update_idletasks()
@_BackendTk.export
class _BackendTkCairo(_BackendTk):
FigureCanvas = FigureCanvasTkCairo
|
6335cdd4292be892d2dabe742cfc5eca96a375671c5da1e85c2e50678556ac46
|
import ctypes
from .backend_cairo import cairo, FigureCanvasCairo, RendererCairo
from .backend_qt5 import QtCore, QtGui, _BackendQT5, FigureCanvasQT
from .qt_compat import QT_API
class FigureCanvasQTCairo(FigureCanvasQT, FigureCanvasCairo):
def __init__(self, figure):
super().__init__(figure=figure)
self._renderer = RendererCairo(self.figure.dpi)
self._renderer.set_width_height(-1, -1) # Invalid values.
def draw(self):
if hasattr(self._renderer.gc, "ctx"):
self.figure.draw(self._renderer)
super().draw()
def paintEvent(self, event):
self._update_dpi()
dpi_ratio = self._dpi_ratio
width = dpi_ratio * self.width()
height = dpi_ratio * self.height()
if (width, height) != self._renderer.get_canvas_width_height():
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
self._renderer.set_ctx_from_surface(surface)
self._renderer.set_width_height(width, height)
self.figure.draw(self._renderer)
buf = self._renderer.gc.ctx.get_target().get_data()
qimage = QtGui.QImage(buf, width, height,
QtGui.QImage.Format_ARGB32_Premultiplied)
# Adjust the buf reference count to work around a memory leak bug in
# QImage under PySide on Python 3.
if QT_API == 'PySide':
ctypes.c_long.from_address(id(buf)).value = 1
if hasattr(qimage, 'setDevicePixelRatio'):
# Not available on Qt4 or some older Qt5.
qimage.setDevicePixelRatio(dpi_ratio)
painter = QtGui.QPainter(self)
painter.eraseRect(event.rect())
painter.drawImage(0, 0, qimage)
self._draw_rect_callback(painter)
painter.end()
@_BackendQT5.export
class _BackendQT5Cairo(_BackendQT5):
FigureCanvas = FigureCanvasQTCairo
|
c5364c50bd7ad61a9cb0e292794e2c12cf64625a10ac2eff3898dfcae614029d
|
"""
Render to qt from agg
"""
from .backend_qt5agg import (
_BackendQT5Agg, FigureCanvasQTAgg, FigureManagerQT, NavigationToolbar2QT)
@_BackendQT5Agg.export
class _BackendQT4Agg(_BackendQT5Agg):
required_interactive_framework = "qt4"
|
fe9e6495b57837e7a703d73afe5fca937460f342387b80cc5252307206c4aae5
|
from collections import OrderedDict
import base64
import gzip
import hashlib
import io
import logging
import re
import uuid
import numpy as np
from matplotlib import cbook, __version__, rcParams
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.colors import rgb2hex
from matplotlib.font_manager import findfont, get_font
from matplotlib.ft2font import LOAD_NO_HINTING
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
from matplotlib import _png
_log = logging.getLogger(__name__)
backend_version = __version__
# ----------------------------------------------------------------------
# SimpleXMLWriter class
#
# Based on an original by Fredrik Lundh, but modified here to:
# 1. Support modern Python idioms
# 2. Remove encoding support (it's handled by the file writer instead)
# 3. Support proper indentation
# 4. Minify things a little bit
# --------------------------------------------------------------------
# The SimpleXMLWriter module is
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
def escape_cdata(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
_escape_xml_comment = re.compile(r'-(?=-)')
def escape_comment(s):
s = escape_cdata(s)
return _escape_xml_comment.sub('- ', s)
def escape_attrib(s):
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace("\"", """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def short_float_fmt(x):
"""
Create a short string representation of a float, which is %f
formatting with trailing zeros and the decimal point removed.
"""
return '{0:f}'.format(x).rstrip('0').rstrip('.')
class XMLWriter(object):
"""
Parameters
----------
file : writable text file-like object
"""
def __init__(self, file):
self.__write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self.__open = 0 # true if start tag is open
self.__tags = []
self.__data = []
self.__indentation = " " * 64
def __flush(self, indent=True):
# flush internal buffers
if self.__open:
if indent:
self.__write(">\n")
else:
self.__write(">")
self.__open = 0
if self.__data:
data = ''.join(self.__data)
self.__write(escape_cdata(data))
self.__data = []
def start(self, tag, attrib={}, **extra):
"""
Opens a new element. Attributes can be given as keyword
arguments, or as a string/string dictionary. The method returns
an opaque identifier that can be passed to the :meth:`close`
method, to close all open elements up to and including this one.
Parameters
----------
tag
Element tag.
attrib
Attribute dictionary. Alternatively, attributes can be given as
keyword arguments.
Returns
-------
An element identifier.
"""
self.__flush()
tag = escape_cdata(tag)
self.__data = []
self.__tags.append(tag)
self.__write(self.__indentation[:len(self.__tags) - 1])
self.__write("<%s" % tag)
for k, v in sorted({**attrib, **extra}.items()):
if not v == '':
k = escape_cdata(k)
v = escape_attrib(v)
self.__write(' %s="%s"' % (k, v))
self.__open = 1
return len(self.__tags)-1
def comment(self, comment):
"""
Adds a comment to the output stream.
Parameters
----------
comment : str
Comment text.
"""
self.__flush()
self.__write(self.__indentation[:len(self.__tags)])
self.__write("<!-- %s -->\n" % escape_comment(comment))
def data(self, text):
"""
Adds character data to the output stream.
Parameters
----------
text : str
Character data.
"""
self.__data.append(text)
def end(self, tag=None, indent=True):
"""
Closes the current element (opened by the most recent call to
:meth:`start`).
Parameters
----------
tag
Element tag. If given, the tag must match the start tag. If
omitted, the current element is closed.
"""
if tag:
assert self.__tags, "unbalanced end(%s)" % tag
assert escape_cdata(tag) == self.__tags[-1], \
"expected end(%s), got %s" % (self.__tags[-1], tag)
else:
assert self.__tags, "unbalanced end()"
tag = self.__tags.pop()
if self.__data:
self.__flush(indent)
elif self.__open:
self.__open = 0
self.__write("/>\n")
return
if indent:
self.__write(self.__indentation[:len(self.__tags)])
self.__write("</%s>\n" % tag)
def close(self, id):
"""
Closes open elements, up to (and including) the element identified
by the given identifier.
Parameters
----------
id
Element identifier, as returned by the :meth:`start` method.
"""
while len(self.__tags) > id:
self.end()
def element(self, tag, text=None, attrib={}, **extra):
"""
Adds an entire element. This is the same as calling :meth:`start`,
:meth:`data`, and :meth:`end` in sequence. The *text* argument can be
omitted.
"""
self.start(*(tag, attrib), **extra)
if text:
self.data(text)
self.end(indent=False)
def flush(self):
"""Flushes the output stream."""
pass # replaced by the constructor
def generate_transform(transform_list=[]):
if len(transform_list):
output = io.StringIO()
for type, value in transform_list:
if (type == 'scale' and (value == (1,) or value == (1, 1))
or type == 'translate' and value == (0, 0)
or type == 'rotate' and value == (0,)):
continue
if type == 'matrix' and isinstance(value, Affine2DBase):
value = value.to_values()
output.write('%s(%s)' % (
type, ' '.join(short_float_fmt(x) for x in value)))
return output.getvalue()
return ''
def generate_css(attrib={}):
if attrib:
output = io.StringIO()
attrib = sorted(attrib.items())
for k, v in attrib:
k = escape_attrib(k)
v = escape_attrib(v)
output.write("%s:%s;" % (k, v))
return output.getvalue()
return ''
_capstyle_d = {'projecting': 'square', 'butt': 'butt', 'round': 'round'}
class RendererSVG(RendererBase):
def __init__(self, width, height, svgwriter, basename=None, image_dpi=72):
self.width = width
self.height = height
self.writer = XMLWriter(svgwriter)
self.image_dpi = image_dpi # actual dpi at which we rasterize stuff
self._groupd = {}
if not rcParams['svg.image_inline']:
assert basename is not None
self.basename = basename
self._imaged = {}
self._clipd = OrderedDict()
self._markers = {}
self._path_collection_id = 0
self._imaged = {}
self._hatchd = OrderedDict()
self._has_gouraud = False
self._n_gradients = 0
self._fonts = OrderedDict()
self.mathtext_parser = MathTextParser('SVG')
RendererBase.__init__(self)
self._glyph_map = dict()
str_height = short_float_fmt(height)
str_width = short_float_fmt(width)
svgwriter.write(svgProlog)
self._start_id = self.writer.start(
'svg',
width='%spt' % str_width,
height='%spt' % str_height,
viewBox='0 0 %s %s' % (str_width, str_height),
xmlns="http://www.w3.org/2000/svg",
version="1.1",
attrib={'xmlns:xlink': "http://www.w3.org/1999/xlink"})
self._write_default_style()
def finalize(self):
self._write_clips()
self._write_hatches()
self.writer.close(self._start_id)
self.writer.flush()
def _write_default_style(self):
writer = self.writer
default_style = generate_css({
'stroke-linejoin': 'round',
'stroke-linecap': 'butt',
'white-space': 'pre',
})
writer.start('defs')
writer.start('style', type='text/css')
writer.data('*{%s}\n' % default_style)
writer.end('style')
writer.end('defs')
def _make_id(self, type, content):
salt = rcParams['svg.hashsalt']
if salt is None:
salt = str(uuid.uuid4())
m = hashlib.md5()
m.update(salt.encode('utf8'))
m.update(str(content).encode('utf8'))
return '%s%s' % (type, m.hexdigest()[:10])
def _make_flip_transform(self, transform):
return (transform +
Affine2D()
.scale(1.0, -1.0)
.translate(0.0, self.height))
def _get_font(self, prop):
fname = findfont(prop)
font = get_font(fname)
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _get_hatch(self, gc, rgbFace):
"""
Create a new hatch pattern
"""
if rgbFace is not None:
rgbFace = tuple(rgbFace)
edge = gc.get_hatch_color()
if edge is not None:
edge = tuple(edge)
dictkey = (gc.get_hatch(), rgbFace, edge)
oid = self._hatchd.get(dictkey)
if oid is None:
oid = self._make_id('h', dictkey)
self._hatchd[dictkey] = ((gc.get_hatch_path(), rgbFace, edge), oid)
else:
_, oid = oid
return oid
def _write_hatches(self):
if not len(self._hatchd):
return
HATCH_SIZE = 72
writer = self.writer
writer.start('defs')
for (path, face, stroke), oid in self._hatchd.values():
writer.start(
'pattern',
id=oid,
patternUnits="userSpaceOnUse",
x="0", y="0", width=str(HATCH_SIZE),
height=str(HATCH_SIZE))
path_data = self._convert_path(
path,
Affine2D()
.scale(HATCH_SIZE).scale(1.0, -1.0).translate(0, HATCH_SIZE),
simplify=False)
if face is None:
fill = 'none'
else:
fill = rgb2hex(face)
writer.element(
'rect',
x="0", y="0", width=str(HATCH_SIZE+1),
height=str(HATCH_SIZE+1),
fill=fill)
writer.element(
'path',
d=path_data,
style=generate_css({
'fill': rgb2hex(stroke),
'stroke': rgb2hex(stroke),
'stroke-width': str(rcParams['hatch.linewidth']),
'stroke-linecap': 'butt',
'stroke-linejoin': 'miter'
})
)
writer.end('pattern')
writer.end('defs')
def _get_style_dict(self, gc, rgbFace):
"""Generate a style string from the GraphicsContext and rgbFace."""
attrib = {}
forced_alpha = gc.get_forced_alpha()
if gc.get_hatch() is not None:
attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace)
if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0
and not forced_alpha):
attrib['fill-opacity'] = short_float_fmt(rgbFace[3])
else:
if rgbFace is None:
attrib['fill'] = 'none'
else:
if tuple(rgbFace[:3]) != (0, 0, 0):
attrib['fill'] = rgb2hex(rgbFace)
if (len(rgbFace) == 4 and rgbFace[3] != 1.0
and not forced_alpha):
attrib['fill-opacity'] = short_float_fmt(rgbFace[3])
if forced_alpha and gc.get_alpha() != 1.0:
attrib['opacity'] = short_float_fmt(gc.get_alpha())
offset, seq = gc.get_dashes()
if seq is not None:
attrib['stroke-dasharray'] = ','.join(
short_float_fmt(val) for val in seq)
attrib['stroke-dashoffset'] = short_float_fmt(float(offset))
linewidth = gc.get_linewidth()
if linewidth:
rgb = gc.get_rgb()
attrib['stroke'] = rgb2hex(rgb)
if not forced_alpha and rgb[3] != 1.0:
attrib['stroke-opacity'] = short_float_fmt(rgb[3])
if linewidth != 1.0:
attrib['stroke-width'] = short_float_fmt(linewidth)
if gc.get_joinstyle() != 'round':
attrib['stroke-linejoin'] = gc.get_joinstyle()
if gc.get_capstyle() != 'butt':
attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()]
return attrib
def _get_style(self, gc, rgbFace):
return generate_css(self._get_style_dict(gc, rgbFace))
def _get_clip(self, gc):
cliprect = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
clippath_trans = self._make_flip_transform(clippath_trans)
dictkey = (id(clippath), str(clippath_trans))
elif cliprect is not None:
x, y, w, h = cliprect.bounds
y = self.height-(y+h)
dictkey = (x, y, w, h)
else:
return None
clip = self._clipd.get(dictkey)
if clip is None:
oid = self._make_id('p', dictkey)
if clippath is not None:
self._clipd[dictkey] = ((clippath, clippath_trans), oid)
else:
self._clipd[dictkey] = (dictkey, oid)
else:
clip, oid = clip
return oid
def _write_clips(self):
if not len(self._clipd):
return
writer = self.writer
writer.start('defs')
for clip, oid in self._clipd.values():
writer.start('clipPath', id=oid)
if len(clip) == 2:
clippath, clippath_trans = clip
path_data = self._convert_path(
clippath, clippath_trans, simplify=False)
writer.element('path', d=path_data)
else:
x, y, w, h = clip
writer.element(
'rect',
x=short_float_fmt(x),
y=short_float_fmt(y),
width=short_float_fmt(w),
height=short_float_fmt(h))
writer.end('clipPath')
writer.end('defs')
def open_group(self, s, gid=None):
# docstring inherited
if gid:
self.writer.start('g', id=gid)
else:
self._groupd[s] = self._groupd.get(s, 0) + 1
self.writer.start('g', id="%s_%d" % (s, self._groupd[s]))
def close_group(self, s):
# docstring inherited
self.writer.end('g')
def option_image_nocomposite(self):
# docstring inherited
return not rcParams['image.composite_image']
def _convert_path(self, path, transform=None, clip=None, simplify=None,
sketch=None):
if clip:
clip = (0.0, 0.0, self.width, self.height)
else:
clip = None
return _path.convert_to_string(
path, transform, clip, simplify, sketch, 6,
[b'M', b'L', b'Q', b'C', b'z'], False).decode('ascii')
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
trans_and_flip = self._make_flip_transform(transform)
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
path_data = self._convert_path(
path, trans_and_flip, clip=clip, simplify=simplify,
sketch=gc.get_sketch_params())
attrib = {}
attrib['style'] = self._get_style(gc, rgbFace)
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
if gc.get_url() is not None:
self.writer.start('a', {'xlink:href': gc.get_url()})
self.writer.element('path', d=path_data, attrib=attrib)
if gc.get_url() is not None:
self.writer.end('a')
def draw_markers(
self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
# docstring inherited
if not len(path.vertices):
return
writer = self.writer
path_data = self._convert_path(
marker_path,
marker_trans + Affine2D().scale(1.0, -1.0),
simplify=False)
style = self._get_style_dict(gc, rgbFace)
dictkey = (path_data, generate_css(style))
oid = self._markers.get(dictkey)
style = generate_css({k: v for k, v in style.items()
if k.startswith('stroke')})
if oid is None:
oid = self._make_id('m', dictkey)
writer.start('defs')
writer.element('path', id=oid, d=path_data, style=style)
writer.end('defs')
self._markers[dictkey] = oid
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
writer.start('g', attrib=attrib)
trans_and_flip = self._make_flip_transform(trans)
attrib = {'xlink:href': '#%s' % oid}
clip = (0, 0, self.width*72, self.height*72)
for vertices, code in path.iter_segments(
trans_and_flip, clip=clip, simplify=False):
if len(vertices):
x, y = vertices[-2:]
attrib['x'] = short_float_fmt(x)
attrib['y'] = short_float_fmt(y)
attrib['style'] = self._get_style(gc, rgbFace)
writer.element('use', attrib=attrib)
writer.end('g')
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is
# (len_path + 5) * uses_per_path
# cost of definition+use is
# (len_path + 3) + 9 * uses_per_path
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + 9 * uses_per_path + 3 < (len_path + 5) * uses_per_path
if not should_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
writer = self.writer
path_codes = []
writer.start('defs')
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0)
d = self._convert_path(path, transform, simplify=False)
oid = 'C%x_%x_%s' % (
self._path_collection_id, i, self._make_id('', d))
writer.element('path', id=oid, d=d)
path_codes.append(oid)
writer.end('defs')
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
clipid = self._get_clip(gc0)
url = gc0.get_url()
if url is not None:
writer.start('a', attrib={'xlink:href': url})
if clipid is not None:
writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
attrib = {
'xlink:href': '#%s' % path_id,
'x': short_float_fmt(xo),
'y': short_float_fmt(self.height - yo),
'style': self._get_style(gc0, rgbFace)
}
writer.element('use', attrib=attrib)
if clipid is not None:
writer.end('g')
if url is not None:
writer.end('a')
self._path_collection_id += 1
def draw_gouraud_triangle(self, gc, points, colors, trans):
# This uses a method described here:
#
# http://www.svgopen.org/2005/papers/Converting3DFaceToSVG/index.html
#
# that uses three overlapping linear gradients to simulate a
# Gouraud triangle. Each gradient goes from fully opaque in
# one corner to fully transparent along the opposite edge.
# The line between the stop points is perpendicular to the
# opposite edge. Underlying these three gradients is a solid
# triangle whose color is the average of all three points.
writer = self.writer
if not self._has_gouraud:
self._has_gouraud = True
writer.start(
'filter',
id='colorAdd')
writer.element(
'feComposite',
attrib={'in': 'SourceGraphic'},
in2='BackgroundImage',
operator='arithmetic',
k2="1", k3="1")
writer.end('filter')
# feColorMatrix filter to correct opacity
writer.start(
'filter',
id='colorMat')
writer.element(
'feColorMatrix',
attrib={'type': 'matrix'},
values='1 0 0 0 0 \n0 1 0 0 0 \n0 0 1 0 0' +
' \n1 1 1 1 0 \n0 0 0 0 1 ')
writer.end('filter')
avg_color = np.sum(colors[:, :], axis=0) / 3.0
# Just skip fully-transparent triangles
if avg_color[-1] == 0.0:
return
trans_and_flip = self._make_flip_transform(trans)
tpoints = trans_and_flip.transform(points)
writer.start('defs')
for i in range(3):
x1, y1 = tpoints[i]
x2, y2 = tpoints[(i + 1) % 3]
x3, y3 = tpoints[(i + 2) % 3]
c = colors[i][:]
if x2 == x3:
xb = x2
yb = y1
elif y2 == y3:
xb = x1
yb = y2
else:
m1 = (y2 - y3) / (x2 - x3)
b1 = y2 - (m1 * x2)
m2 = -(1.0 / m1)
b2 = y1 - (m2 * x1)
xb = (-b1 + b2) / (m1 - m2)
yb = m2 * xb + b2
writer.start(
'linearGradient',
id="GR%x_%d" % (self._n_gradients, i),
gradientUnits="userSpaceOnUse",
x1=short_float_fmt(x1), y1=short_float_fmt(y1),
x2=short_float_fmt(xb), y2=short_float_fmt(yb))
writer.element(
'stop',
offset='1',
style=generate_css({'stop-color': rgb2hex(avg_color),
'stop-opacity': short_float_fmt(c[-1])}))
writer.element(
'stop',
offset='0',
style=generate_css({'stop-color': rgb2hex(c),
'stop-opacity': "0"}))
writer.end('linearGradient')
writer.end('defs')
# triangle formation using "path"
dpath = "M " + short_float_fmt(x1)+',' + short_float_fmt(y1)
dpath += " L " + short_float_fmt(x2) + ',' + short_float_fmt(y2)
dpath += " " + short_float_fmt(x3) + ',' + short_float_fmt(y3) + " Z"
writer.element(
'path',
attrib={'d': dpath,
'fill': rgb2hex(avg_color),
'fill-opacity': '1',
'shape-rendering': "crispEdges"})
writer.start(
'g',
attrib={'stroke': "none",
'stroke-width': "0",
'shape-rendering': "crispEdges",
'filter': "url(#colorMat)"})
writer.element(
'path',
attrib={'d': dpath,
'fill': 'url(#GR%x_0)' % self._n_gradients,
'shape-rendering': "crispEdges"})
writer.element(
'path',
attrib={'d': dpath,
'fill': 'url(#GR%x_1)' % self._n_gradients,
'filter': 'url(#colorAdd)',
'shape-rendering': "crispEdges"})
writer.element(
'path',
attrib={'d': dpath,
'fill': 'url(#GR%x_2)' % self._n_gradients,
'filter': 'url(#colorAdd)',
'shape-rendering': "crispEdges"})
writer.end('g')
self._n_gradients += 1
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
self.writer.start('g', attrib=attrib)
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
self.writer.end('g')
def option_scale_image(self):
# docstring inherited
return True
def get_image_magnification(self):
return self.image_dpi / 72.0
def draw_image(self, gc, x, y, im, transform=None):
# docstring inherited
h, w = im.shape[:2]
if w == 0 or h == 0:
return
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
# Can't apply clip-path directly to the image because the
# image has a transformation, which would also be applied
# to the clip-path
self.writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
oid = gc.get_gid()
url = gc.get_url()
if url is not None:
self.writer.start('a', attrib={'xlink:href': url})
if rcParams['svg.image_inline']:
bytesio = io.BytesIO()
_png.write_png(im, bytesio)
oid = oid or self._make_id('image', bytesio.getvalue())
attrib['xlink:href'] = (
"data:image/png;base64,\n" +
base64.b64encode(bytesio.getvalue()).decode('ascii'))
else:
self._imaged[self.basename] = (
self._imaged.get(self.basename, 0) + 1)
filename = '%s.image%d.png' % (
self.basename, self._imaged[self.basename])
_log.info('Writing image file for inclusion: %s', filename)
_png.write_png(im, filename)
oid = oid or 'Im_' + self._make_id('image', filename)
attrib['xlink:href'] = filename
attrib['id'] = oid
if transform is None:
w = 72.0 * w / self.image_dpi
h = 72.0 * h / self.image_dpi
self.writer.element(
'image',
transform=generate_transform([
('scale', (1, -1)), ('translate', (0, -h))]),
x=short_float_fmt(x),
y=short_float_fmt(-(self.height - y - h)),
width=short_float_fmt(w), height=short_float_fmt(h),
attrib=attrib)
else:
alpha = gc.get_alpha()
if alpha != 1.0:
attrib['opacity'] = short_float_fmt(alpha)
flipped = (
Affine2D().scale(1.0 / w, 1.0 / h) +
transform +
Affine2D()
.translate(x, y)
.scale(1.0, -1.0)
.translate(0.0, self.height))
attrib['transform'] = generate_transform(
[('matrix', flipped.frozen())])
self.writer.element(
'image',
width=short_float_fmt(w), height=short_float_fmt(h),
attrib=attrib)
if url is not None:
self.writer.end('a')
if clipid is not None:
self.writer.end('g')
def _adjust_char_id(self, char_id):
return char_id.replace("%20", "_")
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath, mtext=None):
"""
draw the text by converting them to paths using textpath module.
Parameters
----------
prop : `matplotlib.font_manager.FontProperties`
font property
s : str
text to be converted
usetex : bool
If True, use matplotlib usetex mode.
ismath : bool
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
writer = self.writer
writer.comment(s)
glyph_map = self._glyph_map
text2path = self._text2path
color = rgb2hex(gc.get_rgb())
fontsize = prop.get_size_in_points()
style = {}
if color != '#000000':
style['fill'] = color
alpha = gc.get_alpha() if gc.get_forced_alpha() else gc.get_rgb()[3]
if alpha != 1:
style['opacity'] = short_float_fmt(alpha)
if not ismath:
font = text2path._get_font(prop)
_glyphs = text2path.get_glyphs_with_font(
font, s, glyph_map=glyph_map, return_new_glyphs_only=True)
glyph_info, glyph_map_new, rects = _glyphs
if glyph_map_new:
writer.start('defs')
for char_id, glyph_path in glyph_map_new.items():
path = Path(*glyph_path)
path_data = self._convert_path(path, simplify=False)
writer.element('path', id=char_id, d=path_data)
writer.end('defs')
glyph_map.update(glyph_map_new)
attrib = {}
attrib['style'] = generate_css(style)
font_scale = fontsize / text2path.FONT_SCALE
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,)),
('scale', (font_scale, -font_scale))])
writer.start('g', attrib=attrib)
for glyph_id, xposition, yposition, scale in glyph_info:
attrib = {'xlink:href': '#%s' % glyph_id}
if xposition != 0.0:
attrib['x'] = short_float_fmt(xposition)
if yposition != 0.0:
attrib['y'] = short_float_fmt(yposition)
writer.element(
'use',
attrib=attrib)
writer.end('g')
else:
if ismath == "TeX":
_glyphs = text2path.get_glyphs_tex(
prop, s, glyph_map=glyph_map, return_new_glyphs_only=True)
else:
_glyphs = text2path.get_glyphs_mathtext(
prop, s, glyph_map=glyph_map, return_new_glyphs_only=True)
glyph_info, glyph_map_new, rects = _glyphs
# We store the character glyphs w/o flipping. Instead, the
# coordinate will be flipped when these characters are used.
if glyph_map_new:
writer.start('defs')
for char_id, glyph_path in glyph_map_new.items():
char_id = self._adjust_char_id(char_id)
# Some characters are blank
if not len(glyph_path[0]):
path_data = ""
else:
path = Path(*glyph_path)
path_data = self._convert_path(path, simplify=False)
writer.element('path', id=char_id, d=path_data)
writer.end('defs')
glyph_map.update(glyph_map_new)
attrib = {}
font_scale = fontsize / text2path.FONT_SCALE
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,)),
('scale', (font_scale, -font_scale))])
writer.start('g', attrib=attrib)
for char_id, xposition, yposition, scale in glyph_info:
char_id = self._adjust_char_id(char_id)
writer.element(
'use',
transform=generate_transform([
('translate', (xposition, yposition)),
('scale', (scale,)),
]),
attrib={'xlink:href': '#%s' % char_id})
for verts, codes in rects:
path = Path(verts, codes)
path_data = self._convert_path(path, simplify=False)
writer.element('path', d=path_data)
writer.end('g')
def _draw_text_as_text(self, gc, x, y, s, prop, angle, ismath, mtext=None):
writer = self.writer
color = rgb2hex(gc.get_rgb())
style = {}
if color != '#000000':
style['fill'] = color
alpha = gc.get_alpha() if gc.get_forced_alpha() else gc.get_rgb()[3]
if alpha != 1:
style['opacity'] = short_float_fmt(alpha)
if not ismath:
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fontsize = prop.get_size_in_points()
fontfamily = font.family_name
fontstyle = prop.get_style()
attrib = {}
# Must add "px" to workaround a Firefox bug
style['font-size'] = short_float_fmt(fontsize) + 'px'
style['font-family'] = str(fontfamily)
style['font-style'] = prop.get_style().lower()
style['font-weight'] = str(prop.get_weight()).lower()
attrib['style'] = generate_css(style)
if mtext and (angle == 0 or mtext.get_rotation_mode() == "anchor"):
# If text anchoring can be supported, get the original
# coordinates and add alignment information.
# Get anchor coordinates.
transform = mtext.get_transform()
ax, ay = transform.transform_point(mtext.get_position())
ay = self.height - ay
# Don't do vertical anchor alignment. Most applications do not
# support 'alignment-baseline' yet. Apply the vertical layout
# to the anchor point manually for now.
angle_rad = np.deg2rad(angle)
dir_vert = np.array([np.sin(angle_rad), np.cos(angle_rad)])
v_offset = np.dot(dir_vert, [(x - ax), (y - ay)])
ax = ax + v_offset * dir_vert[0]
ay = ay + v_offset * dir_vert[1]
ha_mpl_to_svg = {'left': 'start', 'right': 'end',
'center': 'middle'}
style['text-anchor'] = ha_mpl_to_svg[mtext.get_ha()]
attrib['x'] = short_float_fmt(ax)
attrib['y'] = short_float_fmt(ay)
attrib['style'] = generate_css(style)
attrib['transform'] = "rotate(%s, %s, %s)" % (
short_float_fmt(-angle),
short_float_fmt(ax),
short_float_fmt(ay))
writer.element('text', s, attrib=attrib)
else:
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,))])
writer.element('text', s, attrib=attrib)
else:
writer.comment(s)
width, height, descent, svg_elements, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
svg_glyphs = svg_elements.svg_glyphs
svg_rects = svg_elements.svg_rects
attrib = {}
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,))])
# Apply attributes to 'g', not 'text', because we likely have some
# rectangles as well with the same style and transformation.
writer.start('g', attrib=attrib)
writer.start('text')
# Sort the characters by font, and output one tspan for each.
spans = OrderedDict()
for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
style = generate_css({
'font-size': short_float_fmt(fontsize) + 'px',
'font-family': font.family_name,
'font-style': font.style_name.lower(),
'font-weight': font.style_name.lower()})
if thetext == 32:
thetext = 0xa0 # non-breaking space
spans.setdefault(style, []).append((new_x, -new_y, thetext))
for style, chars in spans.items():
chars.sort()
if len({y for x, y, t in chars}) == 1: # Are all y's the same?
ys = str(chars[0][1])
else:
ys = ' '.join(str(c[1]) for c in chars)
attrib = {
'style': style,
'x': ' '.join(short_float_fmt(c[0]) for c in chars),
'y': ys
}
writer.element(
'tspan',
''.join(chr(c[2]) for c in chars),
attrib=attrib)
writer.end('text')
if len(svg_rects):
for x, y, width, height in svg_rects:
writer.element(
'rect',
x=short_float_fmt(x),
y=short_float_fmt(-y + height),
width=short_float_fmt(width),
height=short_float_fmt(height)
)
writer.end('g')
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
# docstring inherited
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# docstring inherited
clipid = self._get_clip(gc)
if clipid is not None:
# Cannot apply clip-path directly to the text, because
# is has a transformation
self.writer.start(
'g', attrib={'clip-path': 'url(#%s)' % clipid})
if gc.get_url() is not None:
self.writer.start('a', {'xlink:href': gc.get_url()})
if rcParams['svg.fonttype'] == 'path':
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath, mtext)
else:
self._draw_text_as_text(gc, x, y, s, prop, angle, ismath, mtext)
if gc.get_url() is not None:
self.writer.end('a')
if clipid is not None:
self.writer.end('g')
def flipy(self):
# docstring inherited
return True
def get_canvas_width_height(self):
# docstring inherited
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
# docstring inherited
return self._text2path.get_text_width_height_descent(s, prop, ismath)
class FigureCanvasSVG(FigureCanvasBase):
filetypes = {'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'}
fixed_dpi = 72
def print_svg(self, filename, *args, **kwargs):
with cbook.open_file_cm(filename, "w", encoding="utf-8") as fh:
filename = getattr(fh, 'name', '')
if not isinstance(filename, str):
filename = ''
if cbook.file_requires_unicode(fh):
detach = False
else:
fh = io.TextIOWrapper(fh, 'utf-8')
detach = True
result = self._print_svg(filename, fh, **kwargs)
# Detach underlying stream from wrapper so that it remains open in
# the caller.
if detach:
fh.detach()
return result
def print_svgz(self, filename, *args, **kwargs):
with cbook.open_file_cm(filename, "wb") as fh, \
gzip.GzipFile(mode='w', fileobj=fh) as gzipwriter:
return self.print_svg(gzipwriter)
def _print_svg(
self, filename, fh, *, dpi=72, bbox_inches_restore=None, **kwargs):
self.figure.set_dpi(72.0)
width, height = self.figure.get_size_inches()
w, h = width * 72, height * 72
renderer = MixedModeRenderer(
self.figure, width, height, dpi,
RendererSVG(w, h, fh, filename, dpi),
bbox_inches_restore=bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
def get_default_filetype(self):
return 'svg'
FigureManagerSVG = FigureManagerBase
svgProlog = """\
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Created with matplotlib (https://matplotlib.org/) -->
"""
@_Backend.export
class _BackendSVG(_Backend):
FigureCanvas = FigureCanvasSVG
|
c32c8701ee596995f5a2a7086de4b519977c0c3526ded05ece271719c14413db
|
import numpy as np
from .. import cbook
from . import backend_agg, backend_cairo, backend_gtk3
from .backend_cairo import cairo
from .backend_gtk3 import Gtk, _BackendGTK3
from matplotlib import transforms
class FigureCanvasGTK3Agg(backend_gtk3.FigureCanvasGTK3,
backend_agg.FigureCanvasAgg):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
self._bbox_queue = []
def _renderer_init(self):
pass
def _render_figure(self, width, height):
backend_agg.FigureCanvasAgg.draw(self)
def on_draw_event(self, widget, ctx):
"""GtkDrawable draw event, like expose_event in GTK 2.X.
"""
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
if not len(self._bbox_queue):
Gtk.render_background(
self.get_style_context(), ctx,
allocation.x, allocation.y,
allocation.width, allocation.height)
bbox_queue = [transforms.Bbox([[0, 0], [w, h]])]
else:
bbox_queue = self._bbox_queue
ctx = backend_cairo._to_context(ctx)
for bbox in bbox_queue:
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
buf = cbook._unmultiplied_rgba8888_to_premultiplied_argb32(
np.asarray(self.copy_from_bbox(bbox)))
image = cairo.ImageSurface.create_for_data(
buf.ravel().data, cairo.FORMAT_ARGB32, width, height)
ctx.set_source_surface(image, x, y)
ctx.paint()
if len(self._bbox_queue):
self._bbox_queue = []
return False
def blit(self, bbox=None):
# If bbox is None, blit the entire canvas to gtk. Otherwise
# blit only the area defined by the bbox.
if bbox is None:
bbox = self.figure.bbox
allocation = self.get_allocation()
w, h = allocation.width, allocation.height
x = int(bbox.x0)
y = h - int(bbox.y1)
width = int(bbox.x1) - int(bbox.x0)
height = int(bbox.y1) - int(bbox.y0)
self._bbox_queue.append(bbox)
self.queue_draw_area(x, y, width, height)
def draw(self):
if self.get_visible() and self.get_mapped():
allocation = self.get_allocation()
self._render_figure(allocation.width, allocation.height)
super().draw()
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(backend_agg.FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
class FigureManagerGTK3Agg(backend_gtk3.FigureManagerGTK3):
pass
@_BackendGTK3.export
class _BackendGTK3Cairo(_BackendGTK3):
FigureCanvas = FigureCanvasGTK3Agg
FigureManager = FigureManagerGTK3Agg
|
bfc60b6db1c194b20007d65fde413e7387695eebab33adc00305db3f5ee4cb99
|
"""
Qt binding and backend selector.
The selection logic is as follows:
- if any of PyQt5, PySide2, PyQt4 or PySide have already been imported
(checked in that order), use it;
- otherwise, if the QT_API environment variable (used by Enthought) is set, use
it to determine which binding to use (but do not change the backend based on
it; i.e. if the Qt5Agg backend is requested but QT_API is set to "pyqt4",
then actually use Qt5 with PyQt5 or PySide2 (whichever can be imported);
- otherwise, use whatever the rcParams indicate.
"""
from distutils.version import LooseVersion
import os
import sys
from matplotlib import rcParams
QT_API_PYQT5 = "PyQt5"
QT_API_PYSIDE2 = "PySide2"
QT_API_PYQTv2 = "PyQt4v2"
QT_API_PYSIDE = "PySide"
QT_API_PYQT = "PyQt4" # Use the old sip v1 API (Py3 defaults to v2).
QT_API_ENV = os.environ.get("QT_API")
# Mapping of QT_API_ENV to requested binding. ETS does not support PyQt4v1.
# (https://github.com/enthought/pyface/blob/master/pyface/qt/__init__.py)
_ETS = {"pyqt5": QT_API_PYQT5, "pyside2": QT_API_PYSIDE2,
"pyqt": QT_API_PYQTv2, "pyside": QT_API_PYSIDE,
None: None}
# First, check if anything is already imported.
if "PyQt5" in sys.modules:
QT_API = QT_API_PYQT5
elif "PySide2" in sys.modules:
QT_API = QT_API_PYSIDE2
elif "PyQt4" in sys.modules:
QT_API = QT_API_PYQTv2
elif "PySide" in sys.modules:
QT_API = QT_API_PYSIDE
# Otherwise, check the QT_API environment variable (from Enthought). This can
# only override the binding, not the backend (in other words, we check that the
# requested backend actually matches).
elif rcParams["backend"] in ["Qt5Agg", "Qt5Cairo"]:
if QT_API_ENV in ["pyqt5", "pyside2"]:
QT_API = _ETS[QT_API_ENV]
else:
QT_API = None
elif rcParams["backend"] in ["Qt4Agg", "Qt4Cairo"]:
if QT_API_ENV in ["pyqt4", "pyside"]:
QT_API = _ETS[QT_API_ENV]
else:
QT_API = None
# A non-Qt backend was selected but we still got there (possible, e.g., when
# fully manually embedding Matplotlib in a Qt app without using pyplot).
else:
try:
QT_API = _ETS[QT_API_ENV]
except KeyError:
raise RuntimeError(
"The environment variable QT_API has the unrecognized value {!r};"
"valid values are 'pyqt5', 'pyside2', 'pyqt', and 'pyside'")
def _setup_pyqt5():
global QtCore, QtGui, QtWidgets, __version__, is_pyqt5, _getSaveFileName
if QT_API == QT_API_PYQT5:
from PyQt5 import QtCore, QtGui, QtWidgets
__version__ = QtCore.PYQT_VERSION_STR
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
QtCore.Property = QtCore.pyqtProperty
elif QT_API == QT_API_PYSIDE2:
from PySide2 import QtCore, QtGui, QtWidgets, __version__
else:
raise ValueError("Unexpected value for the 'backend.qt5' rcparam")
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
def is_pyqt5():
return True
def _setup_pyqt4():
global QtCore, QtGui, QtWidgets, __version__, is_pyqt5, _getSaveFileName
def _setup_pyqt4_internal(api):
global QtCore, QtGui, QtWidgets, \
__version__, is_pyqt5, _getSaveFileName
# List of incompatible APIs:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
_sip_apis = ["QDate", "QDateTime", "QString", "QTextStream", "QTime",
"QUrl", "QVariant"]
try:
import sip
except ImportError:
pass
else:
for _sip_api in _sip_apis:
try:
sip.setapi(_sip_api, api)
except ValueError:
pass
from PyQt4 import QtCore, QtGui
__version__ = QtCore.PYQT_VERSION_STR
# PyQt 4.6 introduced getSaveFileNameAndFilter:
# https://riverbankcomputing.com/news/pyqt-46
if __version__ < LooseVersion("4.6"):
raise ImportError("PyQt<4.6 is not supported")
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
QtCore.Property = QtCore.pyqtProperty
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
if QT_API == QT_API_PYQTv2:
_setup_pyqt4_internal(api=2)
elif QT_API == QT_API_PYSIDE:
from PySide import QtCore, QtGui, __version__, __version_info__
# PySide 1.0.3 fixed the following:
# https://srinikom.github.io/pyside-bz-archive/809.html
if __version_info__ < (1, 0, 3):
raise ImportError("PySide<1.0.3 is not supported")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
elif QT_API == QT_API_PYQT:
_setup_pyqt4_internal(api=1)
else:
raise ValueError("Unexpected value for the 'backend.qt4' rcparam")
QtWidgets = QtGui
def is_pyqt5():
return False
if QT_API in [QT_API_PYQT5, QT_API_PYSIDE2]:
_setup_pyqt5()
elif QT_API in [QT_API_PYQTv2, QT_API_PYSIDE, QT_API_PYQT]:
_setup_pyqt4()
elif QT_API is None:
if rcParams["backend"] == "Qt4Agg":
_candidates = [(_setup_pyqt4, QT_API_PYQTv2),
(_setup_pyqt4, QT_API_PYSIDE),
(_setup_pyqt4, QT_API_PYQT),
(_setup_pyqt5, QT_API_PYQT5),
(_setup_pyqt5, QT_API_PYSIDE2)]
else:
_candidates = [(_setup_pyqt5, QT_API_PYQT5),
(_setup_pyqt5, QT_API_PYSIDE2),
(_setup_pyqt4, QT_API_PYQTv2),
(_setup_pyqt4, QT_API_PYSIDE),
(_setup_pyqt4, QT_API_PYQT)]
for _setup, QT_API in _candidates:
try:
_setup()
except ImportError:
continue
break
else:
raise ImportError("Failed to import any qt binding")
else: # We should not get there.
raise AssertionError("Unexpected QT_API: {}".format(QT_API))
# These globals are only defined for backcompatibility purposes.
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5), pyside2=(QT_API_PYSIDE2, 5))
QT_RC_MAJOR_VERSION = 5 if is_pyqt5() else 4
|
ebe26d081a7d44a92ff7112e9ae5a1f8831a48a9f2d40242444dcaec335c1ac0
|
import tkinter as tk
import numpy as np
from matplotlib import cbook
from matplotlib.backends import _tkagg
cbook.warn_deprecated("3.0", name=__name__, obj_type="module")
def blit(photoimage, aggimage, bbox=None, colormode=1):
tk = photoimage.tk
if bbox is not None:
bbox_array = bbox.__array__()
# x1, x2, y1, y2
bboxptr = (bbox_array[0, 0], bbox_array[1, 0],
bbox_array[0, 1], bbox_array[1, 1])
else:
bboxptr = 0
data = np.asarray(aggimage)
dataptr = (data.shape[0], data.shape[1], data.ctypes.data)
try:
tk.call(
"PyAggImagePhoto", photoimage,
dataptr, colormode, bboxptr)
except tk.TclError:
if hasattr(tk, 'interpaddr'):
_tkagg.tkinit(tk.interpaddr(), 1)
else:
# very old python?
_tkagg.tkinit(tk, 0)
tk.call("PyAggImagePhoto", photoimage,
dataptr, colormode, bboxptr)
def test(aggimage):
r = tk.Tk()
c = tk.Canvas(r, width=aggimage.width, height=aggimage.height)
c.pack()
p = tk.PhotoImage(width=aggimage.width, height=aggimage.height)
blit(p, aggimage)
c.create_image(aggimage.width, aggimage.height, image=p)
blit(p, aggimage)
while True:
r.update_idletasks()
|
c26a657239f4fb4e96b0224f4e91582a1652cd6dda7c65ef845a7cf43c095553
|
"""
A Cairo backend for matplotlib
==============================
:Author: Steve Chaplin and others
This backend depends on cairocffi or pycairo.
"""
import gzip
import numpy as np
try:
import cairo
if cairo.version_info < (1, 11, 0):
# Introduced create_for_data for Py3.
raise ImportError
except ImportError:
try:
import cairocffi as cairo
except ImportError:
raise ImportError(
"cairo backend requires that pycairo>=1.11.0 or cairocffi"
"is installed")
backend_version = cairo.version
from .. import cbook
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
RendererBase)
from matplotlib.font_manager import ttfFontProperty
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
if cairo.__name__ == "cairocffi":
# Convert a pycairo context to a cairocffi one.
def _to_context(ctx):
if not isinstance(ctx, cairo.Context):
ctx = cairo.Context._from_pointer(
cairo.ffi.cast(
'cairo_t **',
id(ctx) + object.__basicsize__)[0],
incref=True)
return ctx
else:
# Pass-through a pycairo context.
def _to_context(ctx):
return ctx
@cbook.deprecated("3.0")
class ArrayWrapper:
"""Thin wrapper around numpy ndarray to expose the interface
expected by cairocffi. Basically replicates the
array.array interface.
"""
def __init__(self, myarray):
self.__array = myarray
self.__data = myarray.ctypes.data
self.__size = len(myarray.flatten())
self.itemsize = myarray.itemsize
def buffer_info(self):
return (self.__data, self.__size)
def _append_path(ctx, path, transform, clip=None):
for points, code in path.iter_segments(
transform, remove_nans=True, clip=clip):
if code == Path.MOVETO:
ctx.move_to(*points)
elif code == Path.CLOSEPOLY:
ctx.close_path()
elif code == Path.LINETO:
ctx.line_to(*points)
elif code == Path.CURVE3:
cur = np.asarray(ctx.get_current_point())
a = points[:2]
b = points[-2:]
ctx.curve_to(*(cur / 3 + a * 2 / 3), *(a * 2 / 3 + b / 3), *b)
elif code == Path.CURVE4:
ctx.curve_to(*points)
class RendererCairo(RendererBase):
fontweights = {
100 : cairo.FONT_WEIGHT_NORMAL,
200 : cairo.FONT_WEIGHT_NORMAL,
300 : cairo.FONT_WEIGHT_NORMAL,
400 : cairo.FONT_WEIGHT_NORMAL,
500 : cairo.FONT_WEIGHT_NORMAL,
600 : cairo.FONT_WEIGHT_BOLD,
700 : cairo.FONT_WEIGHT_BOLD,
800 : cairo.FONT_WEIGHT_BOLD,
900 : cairo.FONT_WEIGHT_BOLD,
'ultralight' : cairo.FONT_WEIGHT_NORMAL,
'light' : cairo.FONT_WEIGHT_NORMAL,
'normal' : cairo.FONT_WEIGHT_NORMAL,
'medium' : cairo.FONT_WEIGHT_NORMAL,
'regular' : cairo.FONT_WEIGHT_NORMAL,
'semibold' : cairo.FONT_WEIGHT_BOLD,
'bold' : cairo.FONT_WEIGHT_BOLD,
'heavy' : cairo.FONT_WEIGHT_BOLD,
'ultrabold' : cairo.FONT_WEIGHT_BOLD,
'black' : cairo.FONT_WEIGHT_BOLD,
}
fontangles = {
'italic' : cairo.FONT_SLANT_ITALIC,
'normal' : cairo.FONT_SLANT_NORMAL,
'oblique' : cairo.FONT_SLANT_OBLIQUE,
}
def __init__(self, dpi):
self.dpi = dpi
self.gc = GraphicsContextCairo(renderer=self)
self.text_ctx = cairo.Context(
cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1))
self.mathtext_parser = MathTextParser('Cairo')
RendererBase.__init__(self)
def set_ctx_from_surface(self, surface):
self.gc.ctx = cairo.Context(surface)
# Although it may appear natural to automatically call
# `self.set_width_height(surface.get_width(), surface.get_height())`
# here (instead of having the caller do so separately), this would fail
# for PDF/PS/SVG surfaces, which have no way to report their extents.
def set_width_height(self, width, height):
self.width = width
self.height = height
def _fill_and_stroke(self, ctx, fill_c, alpha, alpha_overrides):
if fill_c is not None:
ctx.save()
if len(fill_c) == 3 or alpha_overrides:
ctx.set_source_rgba(fill_c[0], fill_c[1], fill_c[2], alpha)
else:
ctx.set_source_rgba(fill_c[0], fill_c[1], fill_c[2], fill_c[3])
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
@staticmethod
@cbook.deprecated("3.0")
def convert_path(ctx, path, transform, clip=None):
_append_path(ctx, path, transform, clip)
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
ctx = gc.ctx
# Clip the path to the actual rendering extents if it isn't filled.
clip = (ctx.clip_extents()
if rgbFace is None and gc.get_hatch() is None
else None)
transform = (transform
+ Affine2D().scale(1, -1).translate(0, self.height))
ctx.new_path()
_append_path(ctx, path, transform, clip)
self._fill_and_stroke(
ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
def draw_markers(self, gc, marker_path, marker_trans, path, transform,
rgbFace=None):
# docstring inherited
ctx = gc.ctx
ctx.new_path()
# Create the path for the marker; it needs to be flipped here already!
_append_path(ctx, marker_path, marker_trans + Affine2D().scale(1, -1))
marker_path = ctx.copy_path_flat()
# Figure out whether the path has a fill
x1, y1, x2, y2 = ctx.fill_extents()
if x1 == 0 and y1 == 0 and x2 == 0 and y2 == 0:
filled = False
# No fill, just unset this (so we don't try to fill it later on)
rgbFace = None
else:
filled = True
transform = (transform
+ Affine2D().scale(1, -1).translate(0, self.height))
ctx.new_path()
for i, (vertices, codes) in enumerate(
path.iter_segments(transform, simplify=False)):
if len(vertices):
x, y = vertices[-2:]
ctx.save()
# Translate and apply path
ctx.translate(x, y)
ctx.append_path(marker_path)
ctx.restore()
# Slower code path if there is a fill; we need to draw
# the fill and stroke for each marker at the same time.
# Also flush out the drawing every once in a while to
# prevent the paths from getting way too long.
if filled or i % 1000 == 0:
self._fill_and_stroke(
ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
# Fast path, if there is no fill, draw everything in one step
if not filled:
self._fill_and_stroke(
ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
def draw_image(self, gc, x, y, im):
im = cbook._unmultiplied_rgba8888_to_premultiplied_argb32(im[::-1])
surface = cairo.ImageSurface.create_for_data(
im.ravel().data, cairo.FORMAT_ARGB32,
im.shape[1], im.shape[0], im.shape[1] * 4)
ctx = gc.ctx
y = self.height - y - im.shape[0]
ctx.save()
ctx.set_source_surface(surface, float(x), float(y))
ctx.paint()
ctx.restore()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# docstring inherited
# Note: x,y are device/display coords, not user-coords, unlike other
# draw_* methods
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to(x, y)
ctx.select_font_face(prop.get_name(),
self.fontangles[prop.get_style()],
self.fontweights[prop.get_weight()])
size = prop.get_size_in_points() * self.dpi / 72.0
ctx.save()
if angle:
ctx.rotate(np.deg2rad(-angle))
ctx.set_font_size(size)
ctx.show_text(s)
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
ctx = gc.ctx
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate(np.deg2rad(-angle))
for font, fontsize, s, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, oy)
fontProp = ttfFontProperty(font)
ctx.select_font_face(fontProp.name,
self.fontangles[fontProp.style],
self.fontweights[fontProp.weight])
size = fontsize * self.dpi / 72.0
ctx.set_font_size(size)
ctx.show_text(s)
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle(ox, oy, w, h)
ctx.set_source_rgb(0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def get_canvas_width_height(self):
# docstring inherited
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
# docstring inherited
if ismath:
width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
ctx.save()
ctx.select_font_face(prop.get_name(),
self.fontangles[prop.get_style()],
self.fontweights[prop.get_weight()])
# Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
# but if /96.0 is used the font is too small
size = prop.get_size_in_points() * self.dpi / 72
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.set_font_size(size)
y_bearing, w, h = ctx.text_extents(s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
# docstring inherited
self.gc.ctx.save()
self.gc._alpha = 1
self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA
return self.gc
def points_to_pixels(self, points):
# docstring inherited
return points / 72 * self.dpi
class GraphicsContextCairo(GraphicsContextBase):
_joind = {
'bevel' : cairo.LINE_JOIN_BEVEL,
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
}
_capd = {
'butt' : cairo.LINE_CAP_BUTT,
'projecting' : cairo.LINE_CAP_SQUARE,
'round' : cairo.LINE_CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
def restore(self):
self.ctx.restore()
def set_alpha(self, alpha):
GraphicsContextBase.set_alpha(self, alpha)
_alpha = self.get_alpha()
rgb = self._rgb
if self.get_forced_alpha():
self.ctx.set_source_rgba(rgb[0], rgb[1], rgb[2], _alpha)
else:
self.ctx.set_source_rgba(rgb[0], rgb[1], rgb[2], rgb[3])
# def set_antialiased(self, b):
# cairo has many antialiasing modes, we need to pick one for True and
# one for False.
def set_capstyle(self, cs):
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
self.ctx.set_line_cap(self._capd[cs])
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
if not rectangle:
return
x, y, w, h = np.round(rectangle.bounds)
ctx = self.ctx
ctx.new_path()
ctx.rectangle(x, self.renderer.height - h - y, w, h)
ctx.clip()
def set_clip_path(self, path):
if not path:
return
tpath, affine = path.get_transformed_path_and_affine()
ctx = self.ctx
ctx.new_path()
affine = (affine
+ Affine2D().scale(1, -1).translate(0, self.renderer.height))
_append_path(ctx, tpath, affine)
ctx.clip()
def set_dashes(self, offset, dashes):
self._dashes = offset, dashes
if dashes is None:
self.ctx.set_dash([], 0) # switch dashes off
else:
self.ctx.set_dash(
list(self.renderer.points_to_pixels(np.asarray(dashes))),
offset)
def set_foreground(self, fg, isRGBA=None):
GraphicsContextBase.set_foreground(self, fg, isRGBA)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def get_rgb(self):
return self.ctx.get_source().get_rgba()[:3]
def set_joinstyle(self, js):
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
self.ctx.set_line_join(self._joind[js])
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
self._linewidth = float(w)
self.ctx.set_line_width(self.renderer.points_to_pixels(w))
class FigureCanvasCairo(FigureCanvasBase):
supports_blit = False
def print_png(self, fobj, *args, **kwargs):
self._get_printed_image_surface().write_to_png(fobj)
def print_rgba(self, fobj, *args, **kwargs):
width, height = self.get_width_height()
buf = self._get_printed_image_surface().get_data()
fobj.write(cbook._premultiplied_argb32_to_unmultiplied_rgba8888(
np.asarray(buf).reshape((width, height, 4))))
print_raw = print_rgba
def _get_printed_image_surface(self):
width, height = self.get_width_height()
renderer = RendererCairo(self.figure.dpi)
renderer.set_width_height(width, height)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
renderer.set_ctx_from_surface(surface)
self.figure.draw(renderer)
return surface
def print_pdf(self, fobj, *args, **kwargs):
return self._save(fobj, 'pdf', *args, **kwargs)
def print_ps(self, fobj, *args, **kwargs):
return self._save(fobj, 'ps', *args, **kwargs)
def print_svg(self, fobj, *args, **kwargs):
return self._save(fobj, 'svg', *args, **kwargs)
def print_svgz(self, fobj, *args, **kwargs):
return self._save(fobj, 'svgz', *args, **kwargs)
def _save(self, fo, fmt, **kwargs):
# save PDF/PS/SVG
orientation = kwargs.get('orientation', 'portrait')
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (
height_in_points, width_in_points)
if fmt == 'ps':
if not hasattr(cairo, 'PSSurface'):
raise RuntimeError('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface(fo, width_in_points, height_in_points)
elif fmt == 'pdf':
if not hasattr(cairo, 'PDFSurface'):
raise RuntimeError('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface(fo, width_in_points, height_in_points)
elif fmt in ('svg', 'svgz'):
if not hasattr(cairo, 'SVGSurface'):
raise RuntimeError('cairo has not been compiled with SVG '
'support enabled')
if fmt == 'svgz':
if isinstance(fo, str):
fo = gzip.GzipFile(fo, 'wb')
else:
fo = gzip.GzipFile(None, 'wb', fileobj=fo)
surface = cairo.SVGSurface(fo, width_in_points, height_in_points)
else:
raise ValueError("Unknown format: {!r}".format(fmt))
# surface.set_dpi() can be used
renderer = RendererCairo(self.figure.dpi)
renderer.set_width_height(width_in_points, height_in_points)
renderer.set_ctx_from_surface(surface)
ctx = renderer.gc.ctx
if orientation == 'landscape':
ctx.rotate(np.pi / 2)
ctx.translate(0, -height_in_points)
# Perhaps add an '%%Orientation: Landscape' comment?
self.figure.draw(renderer)
ctx.show_page()
surface.finish()
if fmt == 'svgz':
fo.close()
@_Backend.export
class _BackendCairo(_Backend):
FigureCanvas = FigureCanvasCairo
FigureManager = FigureManagerBase
|
21b57f331ad1cc9aab490fba4b6f7f91120759aeb6b419198a38babac0e3f58a
|
import atexit
import codecs
import functools
import logging
import math
import os
import pathlib
import re
import shutil
import subprocess
import sys
import tempfile
import weakref
import matplotlib as mpl
from matplotlib import _png, cbook, font_manager as fm, __version__, rcParams
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.path import Path
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
_log = logging.getLogger(__name__)
###############################################################################
@cbook.deprecated("3.0")
def get_texcommand():
"""Get chosen TeX system from rc."""
texsystem_options = ["xelatex", "lualatex", "pdflatex"]
texsystem = rcParams["pgf.texsystem"]
return texsystem if texsystem in texsystem_options else "xelatex"
def get_fontspec():
"""Build fontspec preamble from rc."""
latex_fontspec = []
texcommand = rcParams["pgf.texsystem"]
if texcommand != "pdflatex":
latex_fontspec.append("\\usepackage{fontspec}")
if texcommand != "pdflatex" and rcParams["pgf.rcfonts"]:
families = ["serif", "sans\\-serif", "monospace"]
commands = ["setmainfont", "setsansfont", "setmonofont"]
for family, command in zip(families, commands):
# 1) Forward slashes also work on Windows, so don't mess with
# backslashes. 2) The dirname needs to include a separator.
path = pathlib.Path(fm.findfont(family))
latex_fontspec.append(r"\%s{%s}[Path=%s]" % (
command, path.name, path.parent.as_posix() + "/"))
return "\n".join(latex_fontspec)
def get_preamble():
"""Get LaTeX preamble from rc."""
return rcParams["pgf.preamble"]
###############################################################################
# This almost made me cry!!!
# In the end, it's better to use only one unit for all coordinates, since the
# arithmetic in latex seems to produce inaccurate conversions.
latex_pt_to_in = 1. / 72.27
latex_in_to_pt = 1. / latex_pt_to_in
mpl_pt_to_in = 1. / 72.
mpl_in_to_pt = 1. / mpl_pt_to_in
###############################################################################
# helper functions
NO_ESCAPE = r"(?<!\\)(?:\\\\)*"
re_mathsep = re.compile(NO_ESCAPE + r"\$")
re_escapetext = re.compile(NO_ESCAPE + "([_^$%])")
repl_escapetext = lambda m: "\\" + m.group(1)
re_mathdefault = re.compile(NO_ESCAPE + r"(\\mathdefault)")
repl_mathdefault = lambda m: m.group(0)[:-len(m.group(1))]
def common_texification(text):
"""
Do some necessary and/or useful substitutions for texts to be included in
LaTeX documents.
"""
# Sometimes, matplotlib adds the unknown command \mathdefault.
# Not using \mathnormal instead since this looks odd for the latex cm font.
text = re_mathdefault.sub(repl_mathdefault, text)
# split text into normaltext and inline math parts
parts = re_mathsep.split(text)
for i, s in enumerate(parts):
if not i % 2:
# textmode replacements
s = re_escapetext.sub(repl_escapetext, s)
else:
# mathmode replacements
s = r"\(\displaystyle %s\)" % s
parts[i] = s
return "".join(parts)
def writeln(fh, line):
# every line of a file included with \\input must be terminated with %
# if not, latex will create additional vertical spaces for some reason
fh.write(line)
fh.write("%\n")
def _font_properties_str(prop):
# translate font properties to latex commands, return as string
commands = []
families = {"serif": r"\rmfamily", "sans": r"\sffamily",
"sans-serif": r"\sffamily", "monospace": r"\ttfamily"}
family = prop.get_family()[0]
if family in families:
commands.append(families[family])
elif (any(font.name == family for font in fm.fontManager.ttflist)
and rcParams["pgf.texsystem"] != "pdflatex"):
commands.append(r"\setmainfont{%s}\rmfamily" % family)
else:
pass # print warning?
size = prop.get_size_in_points()
commands.append(r"\fontsize{%f}{%f}" % (size, size * 1.2))
styles = {"normal": r"", "italic": r"\itshape", "oblique": r"\slshape"}
commands.append(styles[prop.get_style()])
boldstyles = ["semibold", "demibold", "demi", "bold", "heavy",
"extra bold", "black"]
if prop.get_weight() in boldstyles:
commands.append(r"\bfseries")
commands.append(r"\selectfont")
return "".join(commands)
def make_pdf_to_png_converter():
"""Returns a function that converts a pdf file to a png file."""
if shutil.which("pdftocairo"):
def cairo_convert(pdffile, pngfile, dpi):
cmd = ["pdftocairo", "-singlefile", "-png", "-r", "%d" % dpi,
pdffile, os.path.splitext(pngfile)[0]]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return cairo_convert
try:
gs_info = mpl._get_executable_info("gs")
except FileNotFoundError:
pass
else:
def gs_convert(pdffile, pngfile, dpi):
cmd = [gs_info.executable,
'-dQUIET', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dNOPROMPT',
'-dUseCIEColor', '-dTextAlphaBits=4',
'-dGraphicsAlphaBits=4', '-dDOINTERPOLATE',
'-sDEVICE=png16m', '-sOutputFile=%s' % pngfile,
'-r%d' % dpi, pdffile]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return gs_convert
raise RuntimeError("No suitable pdf to png renderer found.")
class LatexError(Exception):
def __init__(self, message, latex_output=""):
super().__init__(message)
self.latex_output = latex_output
@cbook.deprecated("3.1")
class LatexManagerFactory:
previous_instance = None
@staticmethod
def get_latex_manager():
texcommand = rcParams["pgf.texsystem"]
latex_header = LatexManager._build_latex_header()
prev = LatexManagerFactory.previous_instance
# Check if the previous instance of LatexManager can be reused.
if (prev and prev.latex_header == latex_header
and prev.texcommand == texcommand):
_log.debug("reusing LatexManager")
return prev
else:
_log.debug("creating LatexManager")
new_inst = LatexManager()
LatexManagerFactory.previous_instance = new_inst
return new_inst
class LatexManager:
"""
The LatexManager opens an instance of the LaTeX application for
determining the metrics of text elements. The LaTeX environment can be
modified by setting fonts and/or a custom preamble in the rc parameters.
"""
_unclean_instances = weakref.WeakSet()
@staticmethod
def _build_latex_header():
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
# Create LaTeX header with some content, else LaTeX will load some math
# fonts later when we don't expect the additional output on stdout.
# TODO: is this sufficient?
latex_header = [
# Include TeX program name as a comment for cache invalidation.
r"% !TeX program = {}".format(rcParams["pgf.texsystem"]),
r"\documentclass{minimal}",
latex_preamble,
latex_fontspec,
r"\begin{document}",
r"text $math \mu$", # force latex to load fonts now
r"\typeout{pgf_backend_query_start}",
]
return "\n".join(latex_header)
@classmethod
def _get_cached_or_new(cls):
"""
Return the previous LatexManager if the header and tex system did not
change, or a new instance otherwise.
"""
return cls._get_cached_or_new_impl(cls._build_latex_header())
@classmethod
@functools.lru_cache(1)
def _get_cached_or_new_impl(cls, header): # Helper for _get_cached_or_new.
return cls()
@staticmethod
def _cleanup_remaining_instances():
unclean_instances = list(LatexManager._unclean_instances)
for latex_manager in unclean_instances:
latex_manager._cleanup()
def _stdin_writeln(self, s):
self.latex_stdin_utf8.write(s)
self.latex_stdin_utf8.write("\n")
self.latex_stdin_utf8.flush()
def _expect(self, s):
exp = s.encode("utf8")
buf = bytearray()
while True:
b = self.latex.stdout.read(1)
buf += b
if buf[-len(exp):] == exp:
break
if not len(b):
raise LatexError("LaTeX process halted", buf.decode("utf8"))
return buf.decode("utf8")
def _expect_prompt(self):
return self._expect("\n*")
def __init__(self):
# store references for __del__
self._os_path = os.path
self._shutil = shutil
# create a tmp directory for running latex, remember to cleanup
self.tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_lm_")
LatexManager._unclean_instances.add(self)
# test the LaTeX setup to ensure a clean startup of the subprocess
self.texcommand = rcParams["pgf.texsystem"]
self.latex_header = LatexManager._build_latex_header()
latex_end = "\n\\makeatletter\n\\@@end\n"
try:
latex = subprocess.Popen([self.texcommand, "-halt-on-error"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self.tmpdir)
except FileNotFoundError:
raise RuntimeError(
"Latex command not found. Install %r or change "
"pgf.texsystem to the desired command." % self.texcommand)
except OSError:
raise RuntimeError("Error starting process %r" % self.texcommand)
test_input = self.latex_header + latex_end
stdout, stderr = latex.communicate(test_input.encode("utf-8"))
if latex.returncode != 0:
raise LatexError("LaTeX returned an error, probably missing font "
"or error in preamble:\n%s" % stdout)
# open LaTeX process for real work
latex = subprocess.Popen([self.texcommand, "-halt-on-error"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=self.tmpdir)
self.latex = latex
self.latex_stdin_utf8 = codecs.getwriter("utf8")(self.latex.stdin)
# write header with 'pgf_backend_query_start' token
self._stdin_writeln(self._build_latex_header())
# read all lines until our 'pgf_backend_query_start' token appears
self._expect("*pgf_backend_query_start")
self._expect_prompt()
# cache for strings already processed
self.str_cache = {}
def _cleanup(self):
if not self._os_path.isdir(self.tmpdir):
return
try:
self.latex.communicate()
self.latex_stdin_utf8.close()
self.latex.stdout.close()
except Exception:
pass
try:
self._shutil.rmtree(self.tmpdir)
LatexManager._unclean_instances.discard(self)
except Exception:
sys.stderr.write("error deleting tmp directory %s\n" % self.tmpdir)
def __del__(self):
_log.debug("deleting LatexManager")
self._cleanup()
def get_width_height_descent(self, text, prop):
"""
Get the width, total height and descent for a text typeset by the
current LaTeX environment.
"""
# apply font properties and define textbox
prop_cmds = _font_properties_str(prop)
textbox = "\\sbox0{%s %s}" % (prop_cmds, text)
# check cache
if textbox in self.str_cache:
return self.str_cache[textbox]
# send textbox to LaTeX and wait for prompt
self._stdin_writeln(textbox)
try:
self._expect_prompt()
except LatexError as e:
raise ValueError("Error processing '{}'\nLaTeX Output:\n{}"
.format(text, e.latex_output))
# typeout width, height and text offset of the last textbox
self._stdin_writeln(r"\typeout{\the\wd0,\the\ht0,\the\dp0}")
# read answer from latex and advance to the next prompt
try:
answer = self._expect_prompt()
except LatexError as e:
raise ValueError("Error processing '{}'\nLaTeX Output:\n{}"
.format(text, e.latex_output))
# parse metrics from the answer string
try:
width, height, offset = answer.splitlines()[0].split(",")
except Exception:
raise ValueError("Error processing '{}'\nLaTeX Output:\n{}"
.format(text, answer))
w, h, o = float(width[:-2]), float(height[:-2]), float(offset[:-2])
# the height returned from LaTeX goes from base to top.
# the height matplotlib expects goes from bottom to top.
self.str_cache[textbox] = (w, h + o, o)
return w, h + o, o
class RendererPgf(RendererBase):
def __init__(self, figure, fh, dummy=False):
"""
Creates a new PGF renderer that translates any drawing instruction
into text commands to be interpreted in a latex pgfpicture environment.
Attributes
----------
figure : `matplotlib.figure.Figure`
Matplotlib figure to initialize height, width and dpi from.
fh : file-like
File handle for the output of the drawing commands.
"""
RendererBase.__init__(self)
self.dpi = figure.dpi
self.fh = fh
self.figure = figure
self.image_counter = 0
# get LatexManager instance
self.latexManager = LatexManager._get_cached_or_new()
if dummy:
# dummy==True deactivate all methods
nop = lambda *args, **kwargs: None
for m in RendererPgf.__dict__:
if m.startswith("draw_"):
self.__dict__[m] = nop
else:
# if fh does not belong to a filename, deactivate draw_image
if not hasattr(fh, 'name') or not os.path.exists(fh.name):
cbook._warn_external("streamed pgf-code does not support "
"raster graphics, consider using the "
"pgf-to-pdf option", UserWarning)
self.__dict__["draw_image"] = lambda *args, **kwargs: None
def draw_markers(self, gc, marker_path, marker_trans, path, trans,
rgbFace=None):
# docstring inherited
writeln(self.fh, r"\begin{pgfscope}")
# convert from display units to in
f = 1. / self.dpi
# set style and clip
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
# build marker definition
bl, tr = marker_path.get_extents(marker_trans).get_points()
coords = bl[0] * f, bl[1] * f, tr[0] * f, tr[1] * f
writeln(self.fh,
r"\pgfsys@defobject{currentmarker}"
r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{" % coords)
self._print_pgf_path(None, marker_path, marker_trans)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"}")
# draw marker for each vertex
for point, code in path.iter_segments(trans, simplify=False):
x, y = point[0] * f, point[1] * f
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (x, y))
writeln(self.fh, r"\pgfsys@useobject{currentmarker}{}")
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"\end{pgfscope}")
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
writeln(self.fh, r"\begin{pgfscope}")
# draw the path
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
self._print_pgf_path(gc, path, transform, rgbFace)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"\end{pgfscope}")
# if present, draw pattern on top
if gc.get_hatch():
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_path_styles(gc, rgbFace)
# combine clip and path for clipping
self._print_pgf_clip(gc)
self._print_pgf_path(gc, path, transform, rgbFace)
writeln(self.fh, r"\pgfusepath{clip}")
# build pattern definition
writeln(self.fh,
r"\pgfsys@defobject{currentpattern}"
r"{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}{")
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh,
r"\pgfpathrectangle"
r"{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}")
writeln(self.fh, r"\pgfusepath{clip}")
scale = mpl.transforms.Affine2D().scale(self.dpi)
self._print_pgf_path(None, gc.get_hatch_path(), scale)
self._pgf_path_draw(stroke=True)
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"}")
# repeat pattern, filling the bounding rect of the path
f = 1. / self.dpi
(xmin, ymin), (xmax, ymax) = \
path.get_extents(transform).get_points()
xmin, xmax = f * xmin, f * xmax
ymin, ymax = f * ymin, f * ymax
repx, repy = math.ceil(xmax - xmin), math.ceil(ymax - ymin)
writeln(self.fh,
r"\pgfsys@transformshift{%fin}{%fin}" % (xmin, ymin))
for iy in range(repy):
for ix in range(repx):
writeln(self.fh, r"\pgfsys@useobject{currentpattern}{}")
writeln(self.fh, r"\pgfsys@transformshift{1in}{0in}")
writeln(self.fh, r"\pgfsys@transformshift{-%din}{0in}" % repx)
writeln(self.fh, r"\pgfsys@transformshift{0in}{1in}")
writeln(self.fh, r"\end{pgfscope}")
def _print_pgf_clip(self, gc):
f = 1. / self.dpi
# check for clip box
bbox = gc.get_clip_rectangle()
if bbox:
p1, p2 = bbox.get_points()
w, h = p2 - p1
coords = p1[0] * f, p1[1] * f, w * f, h * f
writeln(self.fh,
r"\pgfpathrectangle"
r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}"
% coords)
writeln(self.fh, r"\pgfusepath{clip}")
# check for clip path
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
self._print_pgf_path(gc, clippath, clippath_trans)
writeln(self.fh, r"\pgfusepath{clip}")
def _print_pgf_path_styles(self, gc, rgbFace):
# cap style
capstyles = {"butt": r"\pgfsetbuttcap",
"round": r"\pgfsetroundcap",
"projecting": r"\pgfsetrectcap"}
writeln(self.fh, capstyles[gc.get_capstyle()])
# join style
joinstyles = {"miter": r"\pgfsetmiterjoin",
"round": r"\pgfsetroundjoin",
"bevel": r"\pgfsetbeveljoin"}
writeln(self.fh, joinstyles[gc.get_joinstyle()])
# filling
has_fill = rgbFace is not None
if gc.get_forced_alpha():
fillopacity = strokeopacity = gc.get_alpha()
else:
strokeopacity = gc.get_rgb()[3]
fillopacity = rgbFace[3] if has_fill and len(rgbFace) > 3 else 1.0
if has_fill:
writeln(self.fh,
r"\definecolor{currentfill}{rgb}{%f,%f,%f}"
% tuple(rgbFace[:3]))
writeln(self.fh, r"\pgfsetfillcolor{currentfill}")
if has_fill and fillopacity != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % fillopacity)
# linewidth and color
lw = gc.get_linewidth() * mpl_pt_to_in * latex_in_to_pt
stroke_rgba = gc.get_rgb()
writeln(self.fh, r"\pgfsetlinewidth{%fpt}" % lw)
writeln(self.fh,
r"\definecolor{currentstroke}{rgb}{%f,%f,%f}"
% stroke_rgba[:3])
writeln(self.fh, r"\pgfsetstrokecolor{currentstroke}")
if strokeopacity != 1.0:
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % strokeopacity)
# line style
dash_offset, dash_list = gc.get_dashes()
if dash_list is None:
writeln(self.fh, r"\pgfsetdash{}{0pt}")
else:
writeln(self.fh,
r"\pgfsetdash{%s}{%fpt}"
% ("".join(r"{%fpt}" % dash for dash in dash_list),
dash_offset))
def _print_pgf_path(self, gc, path, transform, rgbFace=None):
f = 1. / self.dpi
# check for clip box / ignore clip for filled paths
bbox = gc.get_clip_rectangle() if gc else None
if bbox and (rgbFace is None):
p1, p2 = bbox.get_points()
clip = (p1[0], p1[1], p2[0], p2[1])
else:
clip = None
# build path
for points, code in path.iter_segments(transform, clip=clip):
if code == Path.MOVETO:
x, y = tuple(points)
writeln(self.fh,
r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CLOSEPOLY:
writeln(self.fh, r"\pgfpathclose")
elif code == Path.LINETO:
x, y = tuple(points)
writeln(self.fh,
r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CURVE3:
cx, cy, px, py = tuple(points)
coords = cx * f, cy * f, px * f, py * f
writeln(self.fh,
r"\pgfpathquadraticcurveto"
r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}"
% coords)
elif code == Path.CURVE4:
c1x, c1y, c2x, c2y, px, py = tuple(points)
coords = c1x * f, c1y * f, c2x * f, c2y * f, px * f, py * f
writeln(self.fh,
r"\pgfpathcurveto"
r"{\pgfqpoint{%fin}{%fin}}"
r"{\pgfqpoint{%fin}{%fin}}"
r"{\pgfqpoint{%fin}{%fin}}"
% coords)
def _pgf_path_draw(self, stroke=True, fill=False):
actions = []
if stroke:
actions.append("stroke")
if fill:
actions.append("fill")
writeln(self.fh, r"\pgfusepath{%s}" % ",".join(actions))
def option_scale_image(self):
# docstring inherited
return True
def option_image_nocomposite(self):
# docstring inherited
return not rcParams['image.composite_image']
def draw_image(self, gc, x, y, im, transform=None):
# docstring inherited
h, w = im.shape[:2]
if w == 0 or h == 0:
return
# save the images to png files
path = os.path.dirname(self.fh.name)
fname = os.path.splitext(os.path.basename(self.fh.name))[0]
fname_img = "%s-img%d.png" % (fname, self.image_counter)
self.image_counter += 1
_png.write_png(im[::-1], os.path.join(path, fname_img))
# reference the image in the pgf picture
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_clip(gc)
f = 1. / self.dpi # from display coords to inch
if transform is None:
writeln(self.fh,
r"\pgfsys@transformshift{%fin}{%fin}" % (x * f, y * f))
w, h = w * f, h * f
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.frozen().to_values()
writeln(self.fh,
r"\pgfsys@transformcm{%f}{%f}{%f}{%f}{%fin}{%fin}" %
(tr1 * f, tr2 * f, tr3 * f, tr4 * f,
(tr5 + x) * f, (tr6 + y) * f))
w = h = 1 # scale is already included in the transform
interp = str(transform is None).lower() # interpolation in PDF reader
writeln(self.fh,
r"\pgftext[left,bottom]"
r"{\pgfimage[interpolate=%s,width=%fin,height=%fin]{%s}}" %
(interp, w, h, fname_img))
writeln(self.fh, r"\end{pgfscope}")
def draw_tex(self, gc, x, y, s, prop, angle, ismath="TeX!", mtext=None):
# docstring inherited
self.draw_text(gc, x, y, s, prop, angle, ismath, mtext)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# docstring inherited
# prepare string for tex
s = common_texification(s)
prop_cmds = _font_properties_str(prop)
s = r"%s %s" % (prop_cmds, s)
writeln(self.fh, r"\begin{pgfscope}")
alpha = gc.get_alpha()
if alpha != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % alpha)
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % alpha)
rgb = tuple(gc.get_rgb())[:3]
writeln(self.fh, r"\definecolor{textcolor}{rgb}{%f,%f,%f}" % rgb)
writeln(self.fh, r"\pgfsetstrokecolor{textcolor}")
writeln(self.fh, r"\pgfsetfillcolor{textcolor}")
s = r"\color{textcolor}" + s
f = 1.0 / self.figure.dpi
text_args = []
if mtext and (
(angle == 0 or
mtext.get_rotation_mode() == "anchor") and
mtext.get_verticalalignment() != "center_baseline"):
# if text anchoring can be supported, get the original coordinates
# and add alignment information
pos = mtext.get_unitless_position()
x, y = mtext.get_transform().transform_point(pos)
text_args.append("x=%fin" % (x * f))
text_args.append("y=%fin" % (y * f))
halign = {"left": "left", "right": "right", "center": ""}
valign = {"top": "top", "bottom": "bottom",
"baseline": "base", "center": ""}
text_args.append(halign[mtext.get_horizontalalignment()])
text_args.append(valign[mtext.get_verticalalignment()])
else:
# if not, use the text layout provided by matplotlib
text_args.append("x=%fin" % (x * f))
text_args.append("y=%fin" % (y * f))
text_args.append("left")
text_args.append("base")
if angle != 0:
text_args.append("rotate=%f" % angle)
writeln(self.fh, r"\pgftext[%s]{%s}" % (",".join(text_args), s))
writeln(self.fh, r"\end{pgfscope}")
def get_text_width_height_descent(self, s, prop, ismath):
# docstring inherited
# check if the math is supposed to be displaystyled
s = common_texification(s)
# get text metrics in units of latex pt, convert to display units
w, h, d = self.latexManager.get_width_height_descent(s, prop)
# TODO: this should be latex_pt_to_in instead of mpl_pt_to_in
# but having a little bit more space around the text looks better,
# plus the bounding box reported by LaTeX is VERY narrow
f = mpl_pt_to_in * self.dpi
return w * f, h * f, d * f
def flipy(self):
# docstring inherited
return False
def get_canvas_width_height(self):
# docstring inherited
return self.figure.get_figwidth(), self.figure.get_figheight()
def points_to_pixels(self, points):
# docstring inherited
return points * mpl_pt_to_in * self.dpi
def new_gc(self):
# docstring inherited
return GraphicsContextPgf()
class GraphicsContextPgf(GraphicsContextBase):
pass
########################################################################
class TmpDirCleaner:
remaining_tmpdirs = set()
@staticmethod
def add(tmpdir):
TmpDirCleaner.remaining_tmpdirs.add(tmpdir)
@staticmethod
def cleanup_remaining_tmpdirs():
for tmpdir in TmpDirCleaner.remaining_tmpdirs:
error_message = "error deleting tmp directory {}".format(tmpdir)
shutil.rmtree(
tmpdir,
onerror=lambda *args: _log.error(error_message))
class FigureCanvasPgf(FigureCanvasBase):
filetypes = {"pgf": "LaTeX PGF picture",
"pdf": "LaTeX compiled PGF picture",
"png": "Portable Network Graphics", }
def get_default_filetype(self):
return 'pdf'
def _print_pgf_to_fh(self, fh, *args,
dryrun=False, bbox_inches_restore=None, **kwargs):
if dryrun:
renderer = RendererPgf(self.figure, None, dummy=True)
self.figure.draw(renderer)
return
header_text = """%% Creator: Matplotlib, PGF backend
%%
%% To include the figure in your LaTeX document, write
%% \\input{<filename>.pgf}
%%
%% Make sure the required packages are loaded in your preamble
%% \\usepackage{pgf}
%%
%% and, on pdftex
%% \\usepackage[utf8]{inputenc}\\DeclareUnicodeCharacter{2212}{-}
%%
%% or, on luatex and xetex
%% \\usepackage{unicode-math}
%%
%% Figures using additional raster images can only be included by \\input if
%% they are in the same directory as the main LaTeX file. For loading figures
%% from other directories you can use the `import` package
%% \\usepackage{import}
%%
%% and then include the figures with
%% \\import{<path to file>}{<filename>.pgf}
%%
"""
# append the preamble used by the backend as a comment for debugging
header_info_preamble = ["%% Matplotlib used the following preamble"]
for line in get_preamble().splitlines():
header_info_preamble.append("%% " + line)
for line in get_fontspec().splitlines():
header_info_preamble.append("%% " + line)
header_info_preamble.append("%%")
header_info_preamble = "\n".join(header_info_preamble)
# get figure size in inch
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
dpi = self.figure.get_dpi()
# create pgfpicture environment and write the pgf code
fh.write(header_text)
fh.write(header_info_preamble)
fh.write("\n")
writeln(fh, r"\begingroup")
writeln(fh, r"\makeatletter")
writeln(fh, r"\begin{pgfpicture}")
writeln(fh,
r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}"
% (w, h))
writeln(fh, r"\pgfusepath{use as bounding box, clip}")
renderer = MixedModeRenderer(self.figure, w, h, dpi,
RendererPgf(self.figure, fh),
bbox_inches_restore=bbox_inches_restore)
self.figure.draw(renderer)
# end the pgfpicture environment
writeln(fh, r"\end{pgfpicture}")
writeln(fh, r"\makeatother")
writeln(fh, r"\endgroup")
def print_pgf(self, fname_or_fh, *args, **kwargs):
"""
Output pgf commands for drawing the figure so it can be included and
rendered in latex documents.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
with cbook.open_file_cm(fname_or_fh, "w", encoding="utf-8") as file:
if not cbook.file_requires_unicode(file):
file = codecs.getwriter("utf-8")(file)
self._print_pgf_to_fh(file, *args, **kwargs)
def _print_pdf_to_fh(self, fh, *args, **kwargs):
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
try:
# create temporary directory for compiling the figure
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pgf = os.path.join(tmpdir, "figure.pgf")
fname_tex = os.path.join(tmpdir, "figure.tex")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
# print figure to pgf and compile it with latex
self.print_pgf(fname_pgf, *args, **kwargs)
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
latexcode = """
\\documentclass[12pt]{minimal}
\\usepackage[paperwidth=%fin, paperheight=%fin, margin=0in]{geometry}
%s
%s
\\usepackage{pgf}
\\begin{document}
\\centering
\\input{figure.pgf}
\\end{document}""" % (w, h, latex_preamble, latex_fontspec)
pathlib.Path(fname_tex).write_text(latexcode, encoding="utf-8")
texcommand = rcParams["pgf.texsystem"]
cbook._check_and_log_subprocess(
[texcommand, "-interaction=nonstopmode", "-halt-on-error",
"figure.tex"], _log, cwd=tmpdir)
# copy file contents to target
with open(fname_pdf, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_pdf(self, fname_or_fh, *args, **kwargs):
"""Use LaTeX to compile a Pgf generated figure to PDF."""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
with cbook.open_file_cm(fname_or_fh, "wb") as file:
self._print_pdf_to_fh(file, *args, **kwargs)
def _print_png_to_fh(self, fh, *args, **kwargs):
converter = make_pdf_to_png_converter()
try:
# create temporary directory for pdf creation and png conversion
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
fname_png = os.path.join(tmpdir, "figure.png")
# create pdf and try to convert it to png
self.print_pdf(fname_pdf, *args, **kwargs)
converter(fname_pdf, fname_png, dpi=self.figure.dpi)
# copy file contents to target
with open(fname_png, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_png(self, fname_or_fh, *args, **kwargs):
"""Use LaTeX to compile a pgf figure to pdf and convert it to png."""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
with cbook.open_file_cm(fname_or_fh, "wb") as file:
self._print_png_to_fh(file, *args, **kwargs)
def get_renderer(self):
return RendererPgf(self.figure, None, dummy=True)
class FigureManagerPgf(FigureManagerBase):
pass
@_Backend.export
class _BackendPgf(_Backend):
FigureCanvas = FigureCanvasPgf
FigureManager = FigureManagerPgf
def _cleanup_all():
LatexManager._cleanup_remaining_instances()
TmpDirCleaner.cleanup_remaining_tmpdirs()
atexit.register(_cleanup_all)
class PdfPages:
"""
A multi-page PDF file using the pgf backend
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Initialize:
>>> with PdfPages('foo.pdf') as pdf:
... # As many times as you like, create a figure fig and save it:
... fig = plt.figure()
... pdf.savefig(fig)
... # When no figure is specified the current figure is saved
... pdf.savefig()
"""
__slots__ = (
'_outputfile',
'keep_empty',
'_tmpdir',
'_basename',
'_fname_tex',
'_fname_pdf',
'_n_figures',
'_file',
'metadata',
)
def __init__(self, filename, *, keep_empty=True, metadata=None):
"""
Create a new PdfPages object.
Parameters
----------
filename : str
Plots using :meth:`PdfPages.savefig` will be written to a file at
this location. Any older file with the same name is overwritten.
keep_empty : bool, optional
If set to False, then empty pdf files will be deleted automatically
when closed.
metadata : dictionary, optional
Information dictionary object (see PDF reference section 10.2.1
'Document Information Dictionary'), e.g.:
`{'Creator': 'My software', 'Author': 'Me',
'Title': 'Awesome fig'}`
The standard keys are `'Title'`, `'Author'`, `'Subject'`,
`'Keywords'`, `'Producer'`, `'Creator'` and `'Trapped'`.
Values have been predefined for `'Creator'` and `'Producer'`.
They can be removed by setting them to the empty string.
"""
self._outputfile = filename
self._n_figures = 0
self.keep_empty = keep_empty
self.metadata = metadata or {}
# create temporary directory for compiling the figure
self._tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_pdfpages_")
self._basename = 'pdf_pages'
self._fname_tex = os.path.join(self._tmpdir, self._basename + ".tex")
self._fname_pdf = os.path.join(self._tmpdir, self._basename + ".pdf")
self._file = open(self._fname_tex, 'wb')
def _write_header(self, width_inches, height_inches):
supported_keys = {
'title', 'author', 'subject', 'keywords', 'creator',
'producer', 'trapped'
}
infoDict = {
'creator': 'matplotlib %s, https://matplotlib.org' % __version__,
'producer': 'matplotlib pgf backend %s' % __version__,
}
metadata = {k.lower(): v for k, v in self.metadata.items()}
infoDict.update(metadata)
hyperref_options = ''
for k, v in infoDict.items():
if k not in supported_keys:
raise ValueError(
'Not a supported pdf metadata field: "{}"'.format(k)
)
hyperref_options += 'pdf' + k + '={' + str(v) + '},'
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
latex_header = r"""\PassOptionsToPackage{{
{metadata}
}}{{hyperref}}
\RequirePackage{{hyperref}}
\documentclass[12pt]{{minimal}}
\usepackage[
paperwidth={width}in,
paperheight={height}in,
margin=0in
]{{geometry}}
{preamble}
{fontspec}
\usepackage{{pgf}}
\setlength{{\parindent}}{{0pt}}
\begin{{document}}%%
""".format(
width=width_inches,
height=height_inches,
preamble=latex_preamble,
fontspec=latex_fontspec,
metadata=hyperref_options,
)
self._file.write(latex_header.encode('utf-8'))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""
Finalize this object, running LaTeX in a temporary directory
and moving the final pdf file to `filename`.
"""
self._file.write(rb'\end{document}\n')
self._file.close()
if self._n_figures > 0:
try:
self._run_latex()
finally:
try:
shutil.rmtree(self._tmpdir)
except:
TmpDirCleaner.add(self._tmpdir)
elif self.keep_empty:
open(self._outputfile, 'wb').close()
def _run_latex(self):
texcommand = rcParams["pgf.texsystem"]
cbook._check_and_log_subprocess(
[texcommand, "-interaction=nonstopmode", "-halt-on-error",
os.path.basename(self._fname_tex)],
_log, cwd=self._tmpdir)
# copy file contents to target
shutil.copyfile(self._fname_pdf, self._outputfile)
def savefig(self, figure=None, **kwargs):
"""
Saves a :class:`~matplotlib.figure.Figure` to this file as a new page.
Any other keyword arguments are passed to
:meth:`~matplotlib.figure.Figure.savefig`.
Parameters
----------
figure : :class:`~matplotlib.figure.Figure` or int, optional
Specifies what figure is saved to file. If not specified, the
active figure is saved. If a :class:`~matplotlib.figure.Figure`
instance is provided, this figure is saved. If an int is specified,
the figure instance to save is looked up by number.
"""
if not isinstance(figure, Figure):
if figure is None:
manager = Gcf.get_active()
else:
manager = Gcf.get_fig_manager(figure)
if manager is None:
raise ValueError("No figure {}".format(figure))
figure = manager.canvas.figure
try:
orig_canvas = figure.canvas
figure.canvas = FigureCanvasPgf(figure)
width, height = figure.get_size_inches()
if self._n_figures == 0:
self._write_header(width, height)
else:
# \pdfpagewidth and \pdfpageheight exist on pdftex, xetex, and
# luatex<0.85; they were renamed to \pagewidth and \pageheight
# on luatex>=0.85.
self._file.write(
br'\newpage'
br'\ifdefined\pdfpagewidth\pdfpagewidth'
br'\else\pagewidth\fi=%ain'
br'\ifdefined\pdfpageheight\pdfpageheight'
br'\else\pageheight\fi=%ain'
b'%%\n' % (width, height)
)
figure.savefig(self._file, format="pgf", **kwargs)
self._n_figures += 1
finally:
figure.canvas = orig_canvas
def get_pagecount(self):
"""
Returns the current number of pages in the multipage pdf file.
"""
return self._n_figures
|
87c26a12a02d13320b306eb5417c61a841c17c8bfa192db4fb26e13477963bf1
|
import wx
from .backend_agg import FigureCanvasAgg
from .backend_wx import (
_BackendWx, _FigureCanvasWxBase, FigureFrameWx,
NavigationToolbar2Wx as NavigationToolbar2WxAgg)
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
class FigureCanvasWxAgg(FigureCanvasAgg, _FigureCanvasWxBase):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC, origin='WXAgg')
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.Bitmap.FromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
l, b, width, height = bbox.bounds
r = l + width
t = b + height
srcBmp = wx.Bitmap.FromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.Bitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
@_BackendWx.export
class _BackendWxAgg(_BackendWx):
FigureCanvas = FigureCanvasWxAgg
_frame_class = FigureFrameWxAgg
|
40fc0ce9d9a7307b50afcd8fb5fa30ccf2c4f4c463d50004831be73a17660f96
|
"""
A wx API adapter to hide differences between wxPython classic and phoenix.
It is assumed that the user code is selecting what version it wants to use,
here we just ensure that it meets the minimum required by matplotlib.
For an example see embedding_in_wx2.py
"""
import wx
from .. import cbook
from .backend_wx import RendererWx
cbook.warn_deprecated("3.0", name=__name__, obj_type="module")
backend_version = wx.VERSION_STRING
is_phoenix = 'phoenix' in wx.PlatformInfo
fontweights = RendererWx.fontweights
fontangles = RendererWx.fontangles
fontnames = RendererWx.fontnames
dashd_wx = {'solid': wx.PENSTYLE_SOLID,
'dashed': wx.PENSTYLE_SHORT_DASH,
'dashdot': wx.PENSTYLE_DOT_DASH,
'dotted': wx.PENSTYLE_DOT}
# functions changes
BitmapFromBuffer = wx.Bitmap.FromBufferRGBA
EmptyBitmap = wx.Bitmap
EmptyImage = wx.Image
Cursor = wx.Cursor
EventLoop = wx.GUIEventLoop
NamedColour = wx.Colour
StockCursor = wx.Cursor
|
07e2c3fc0102c5556c640e996fea31a0ddfd88cf8650d97cd4b47b9d5689f2ae
|
from .backend_qt5cairo import _BackendQT5Cairo
@_BackendQT5Cairo.export
class _BackendQT4Cairo(_BackendQT5Cairo):
required_interactive_framework = "qt4"
|
ca91595b4027e157f6810e5d17e10fb1d41a43bd149664e85114d2d8e896f887
|
"""Interactive figures in the IPython notebook"""
# Note: There is a notebook in
# lib/matplotlib/backends/web_backend/nbagg_uat.ipynb to help verify
# that changes made maintain expected behaviour.
from base64 import b64encode
import io
import json
import pathlib
import uuid
from IPython.display import display, Javascript, HTML
try:
# Jupyter/IPython 4.x or later
from ipykernel.comm import Comm
except ImportError:
# Jupyter/IPython 3.x or earlier
from IPython.kernel.comm import Comm
from matplotlib import cbook, is_interactive
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, NavigationToolbar2)
from matplotlib.backends.backend_webagg_core import (
FigureCanvasWebAggCore, FigureManagerWebAgg, NavigationToolbar2WebAgg,
TimerTornado)
def connection_info():
"""
Return a string showing the figure and connection status for the backend.
This is intended as a diagnostic tool, and not for general use.
"""
result = [
'{fig} - {socket}'.format(
fig=(manager.canvas.figure.get_label()
or "Figure {}".format(manager.num)),
socket=manager.web_sockets)
for manager in Gcf.get_all_fig_managers()
]
if not is_interactive():
result.append('Figures pending show: {}'.format(len(Gcf._activeQue)))
return '\n'.join(result)
# Note: Version 3.2 and 4.x icons
# http://fontawesome.io/3.2.1/icons/
# http://fontawesome.io/
# the `fa fa-xxx` part targets font-awesome 4, (IPython 3.x)
# the icon-xxx targets font awesome 3.21 (IPython 2.x)
_FONT_AWESOME_CLASSES = {
'home': 'fa fa-home icon-home',
'back': 'fa fa-arrow-left icon-arrow-left',
'forward': 'fa fa-arrow-right icon-arrow-right',
'zoom_to_rect': 'fa fa-square-o icon-check-empty',
'move': 'fa fa-arrows icon-move',
'download': 'fa fa-floppy-o icon-save',
None: None
}
class NavigationIPy(NavigationToolbar2WebAgg):
# Use the standard toolbar items + download button
toolitems = [(text, tooltip_text,
_FONT_AWESOME_CLASSES[image_file], name_of_method)
for text, tooltip_text, image_file, name_of_method
in (NavigationToolbar2.toolitems +
(('Download', 'Download plot', 'download', 'download'),))
if image_file in _FONT_AWESOME_CLASSES]
class FigureManagerNbAgg(FigureManagerWebAgg):
ToolbarCls = NavigationIPy
def __init__(self, canvas, num):
self._shown = False
FigureManagerWebAgg.__init__(self, canvas, num)
def display_js(self):
# XXX How to do this just once? It has to deal with multiple
# browser instances using the same kernel (require.js - but the
# file isn't static?).
display(Javascript(FigureManagerNbAgg.get_javascript()))
def show(self):
if not self._shown:
self.display_js()
self._create_comm()
else:
self.canvas.draw_idle()
self._shown = True
def reshow(self):
"""
A special method to re-show the figure in the notebook.
"""
self._shown = False
self.show()
@property
def connected(self):
return bool(self.web_sockets)
@classmethod
def get_javascript(cls, stream=None):
if stream is None:
output = io.StringIO()
else:
output = stream
super().get_javascript(stream=output)
output.write((pathlib.Path(__file__).parent
/ "web_backend/js/nbagg_mpl.js")
.read_text(encoding="utf-8"))
if stream is None:
return output.getvalue()
def _create_comm(self):
comm = CommSocket(self)
self.add_web_socket(comm)
return comm
def destroy(self):
self._send_event('close')
# need to copy comms as callbacks will modify this list
for comm in list(self.web_sockets):
comm.on_close()
self.clearup_closed()
def clearup_closed(self):
"""Clear up any closed Comms."""
self.web_sockets = {socket for socket in self.web_sockets
if socket.is_open()}
if len(self.web_sockets) == 0:
self.canvas.close_event()
def remove_comm(self, comm_id):
self.web_sockets = {socket for socket in self.web_sockets
if not socket.comm.comm_id == comm_id}
class FigureCanvasNbAgg(FigureCanvasWebAggCore):
def new_timer(self, *args, **kwargs):
# docstring inherited
return TimerTornado(*args, **kwargs)
class CommSocket(object):
"""
Manages the Comm connection between IPython and the browser (client).
Comms are 2 way, with the CommSocket being able to publish a message
via the send_json method, and handle a message with on_message. On the
JS side figure.send_message and figure.ws.onmessage do the sending and
receiving respectively.
"""
def __init__(self, manager):
self.supports_binary = None
self.manager = manager
self.uuid = str(uuid.uuid4())
# Publish an output area with a unique ID. The javascript can then
# hook into this area.
display(HTML("<div id=%r></div>" % self.uuid))
try:
self.comm = Comm('matplotlib', data={'id': self.uuid})
except AttributeError:
raise RuntimeError('Unable to create an IPython notebook Comm '
'instance. Are you in the IPython notebook?')
self.comm.on_msg(self.on_message)
manager = self.manager
self._ext_close = False
def _on_close(close_message):
self._ext_close = True
manager.remove_comm(close_message['content']['comm_id'])
manager.clearup_closed()
self.comm.on_close(_on_close)
def is_open(self):
return not (self._ext_close or self.comm._closed)
def on_close(self):
# When the socket is closed, deregister the websocket with
# the FigureManager.
if self.is_open():
try:
self.comm.close()
except KeyError:
# apparently already cleaned it up?
pass
def send_json(self, content):
self.comm.send({'data': json.dumps(content)})
def send_binary(self, blob):
# The comm is ascii, so we always send the image in base64
# encoded data URL form.
data = b64encode(blob).decode('ascii')
data_uri = "data:image/png;base64,{0}".format(data)
self.comm.send({'data': data_uri})
def on_message(self, message):
# The 'supports_binary' message is relevant to the
# websocket itself. The other messages get passed along
# to matplotlib as-is.
# Every message has a "type" and a "figure_id".
message = json.loads(message['content']['data'])
if message['type'] == 'closing':
self.on_close()
self.manager.clearup_closed()
elif message['type'] == 'supports_binary':
self.supports_binary = message['value']
else:
self.manager.handle_json(message)
@_Backend.export
class _BackendNbAgg(_Backend):
FigureCanvas = FigureCanvasNbAgg
FigureManager = FigureManagerNbAgg
@staticmethod
def new_figure_manager_given_figure(num, figure):
canvas = FigureCanvasNbAgg(figure)
manager = FigureManagerNbAgg(canvas, num)
if is_interactive():
manager.show()
figure.canvas.draw_idle()
canvas.mpl_connect('close_event', lambda event: Gcf.destroy(num))
return manager
@staticmethod
def trigger_manager_draw(manager):
manager.show()
@staticmethod
def show(*args, block=None, **kwargs):
if args or kwargs:
cbook.warn_deprecated(
"3.1", message="Passing arguments to show(), other than "
"passing 'block' by keyword, is deprecated %(since)s, and "
"support for it will be removed %(removal)s.")
## TODO: something to do when keyword block==False ?
from matplotlib._pylab_helpers import Gcf
managers = Gcf.get_all_fig_managers()
if not managers:
return
interactive = is_interactive()
for manager in managers:
manager.show()
# plt.figure adds an event which puts the figure in focus
# in the activeQue. Disable this behaviour, as it results in
# figures being put as the active figure after they have been
# shown, even in non-interactive mode.
if hasattr(manager, '_cidgcf'):
manager.canvas.mpl_disconnect(manager._cidgcf)
if not interactive and manager in Gcf._activeQue:
Gcf._activeQue.remove(manager)
|
226f8f206cba808a65dafdf3abd677d986f75ab96cb7a36b30177d1b164449bd
|
"""
Render to qt from agg.
"""
import ctypes
from matplotlib.transforms import Bbox
from .. import cbook
from .backend_agg import FigureCanvasAgg
from .backend_qt5 import (
QtCore, QtGui, QtWidgets, _BackendQT5, FigureCanvasQT, FigureManagerQT,
NavigationToolbar2QT, backend_version)
from .qt_compat import QT_API
class FigureCanvasQTAgg(FigureCanvasAgg, FigureCanvasQT):
def __init__(self, figure):
# Must pass 'figure' as kwarg to Qt base class.
super().__init__(figure=figure)
def paintEvent(self, event):
"""Copy the image from the Agg canvas to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
if self._update_dpi():
# The dpi update triggered its own paintEvent.
return
self._draw_idle() # Only does something if a draw is pending.
# If the canvas does not have a renderer, then give up and wait for
# FigureCanvasAgg.draw(self) to be called.
if not hasattr(self, 'renderer'):
return
painter = QtGui.QPainter(self)
rect = event.rect()
left = rect.left()
top = rect.top()
width = rect.width()
height = rect.height()
# See documentation of QRect: bottom() and right() are off by 1, so use
# left() + width() and top() + height().
bbox = Bbox(
[[left, self.renderer.height - (top + height * self._dpi_ratio)],
[left + width * self._dpi_ratio, self.renderer.height - top]])
reg = self.copy_from_bbox(bbox)
buf = cbook._unmultiplied_rgba8888_to_premultiplied_argb32(
memoryview(reg))
# clear the widget canvas
painter.eraseRect(rect)
qimage = QtGui.QImage(buf, buf.shape[1], buf.shape[0],
QtGui.QImage.Format_ARGB32_Premultiplied)
if hasattr(qimage, 'setDevicePixelRatio'):
# Not available on Qt4 or some older Qt5.
qimage.setDevicePixelRatio(self._dpi_ratio)
origin = QtCore.QPoint(left, top)
painter.drawImage(origin / self._dpi_ratio, qimage)
# Adjust the buf reference count to work around a memory
# leak bug in QImage under PySide on Python 3.
if QT_API in ('PySide', 'PySide2'):
ctypes.c_long.from_address(id(buf)).value = 1
self._draw_rect_callback(painter)
painter.end()
def blit(self, bbox=None):
"""Blit the region in bbox.
"""
# If bbox is None, blit the entire canvas. Otherwise
# blit only the area defined by the bbox.
if bbox is None and self.figure:
bbox = self.figure.bbox
# repaint uses logical pixels, not physical pixels like the renderer.
l, b, w, h = [pt / self._dpi_ratio for pt in bbox.bounds]
t = b + h
self.repaint(l, self.renderer.height / self._dpi_ratio - t, w, h)
def print_figure(self, *args, **kwargs):
super().print_figure(*args, **kwargs)
self.draw()
@_BackendQT5.export
class _BackendQT5Agg(_BackendQT5):
FigureCanvas = FigureCanvasQTAgg
|
c7d28745733125f9a527eea531d98aaf0f5c19ee519590001a393519ad715f67
|
from .backend_qt5 import (
backend_version, SPECIAL_KEYS, SUPER, ALT, CTRL, SHIFT, MODIFIER_KEYS,
cursord, _create_qApp, _BackendQT5, TimerQT, MainWindow, FigureManagerQT,
NavigationToolbar2QT, SubplotToolQt, error_msg_qt, exception_handler)
from .backend_qt5 import FigureCanvasQT as FigureCanvasQT5
@_BackendQT5.export
class _BackendQT4(_BackendQT5):
required_interactive_framework = "qt4"
|
034602e43239b71ad1b8d493e2fb9ce4343ac62c0388d39444cf352096e902d1
|
"""
This is a fully functional do nothing backend to provide a template to
backend writers. It is fully functional in that you can select it as
a backend with
import matplotlib
matplotlib.use('Template')
and your matplotlib scripts will (should!) run without error, though
no output is produced. This provides a nice starting point for
backend writers because you can selectively implement methods
(draw_rectangle, draw_lines, etc...) and slowly see your figure come
to life w/o having to have a full blown implementation before getting
any results.
Copy this to backend_xxx.py and replace all instances of 'template'
with 'xxx'. Then implement the class methods and functions below, and
add 'xxx' to the switchyard in matplotlib/backends/__init__.py and
'xxx' to the backends list in the validate_backend method in
matplotlib/__init__.py and you're off. You can use your backend with::
import matplotlib
matplotlib.use('xxx')
import matplotlib.pyplot as plt
plt.plot([1,2,3])
plt.show()
matplotlib also supports external backends, so you can place you can
use any module in your PYTHONPATH with the syntax::
import matplotlib
matplotlib.use('module://my_backend')
where my_backend.py is your module name. This syntax is also
recognized in the rc file and in the -d argument in pylab, e.g.,::
python simple_plot.py -dmodule://my_backend
If your backend implements support for saving figures (i.e. has a print_xyz()
method) you can register it as the default handler for a given file type
from matplotlib.backend_bases import register_backend
register_backend('xyz', 'my_backend', 'XYZ File Format')
...
plt.savefig("figure.xyz")
The files that are most relevant to backend_writers are
matplotlib/backends/backend_your_backend.py
matplotlib/backend_bases.py
matplotlib/backends/__init__.py
matplotlib/__init__.py
matplotlib/_pylab_helpers.py
Naming Conventions
* classes Upper or MixedUpperCase
* variables lower or lowerUpper
* functions lower or underscore_separated
"""
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
FigureCanvasBase, FigureManagerBase, GraphicsContextBase, RendererBase)
from matplotlib.figure import Figure
class RendererTemplate(RendererBase):
"""
The renderer handles drawing/rendering operations.
This is a minimal do-nothing class that can be used to get started when
writing a new backend. Refer to backend_bases.RendererBase for
documentation of the classes methods.
"""
def __init__(self, dpi):
self.dpi = dpi
def draw_path(self, gc, path, transform, rgbFace=None):
pass
# draw_markers is optional, and we get more correct relative
# timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_markers(self, gc, marker_path, marker_trans, path, trans,
# rgbFace=None):
# pass
# draw_path_collection is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_path_collection(self, gc, master_transform, paths,
# all_transforms, offsets, offsetTrans,
# facecolors, edgecolors, linewidths, linestyles,
# antialiaseds):
# pass
# draw_quad_mesh is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
# coordinates, offsets, offsetTrans, facecolors,
# antialiased, edgecolors):
# pass
def draw_image(self, gc, x, y, im):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
pass
def flipy(self):
# docstring inherited
return True
def get_canvas_width_height(self):
# docstring inherited
return 100, 100
def get_text_width_height_descent(self, s, prop, ismath):
return 1, 1, 1
def new_gc(self):
# docstring inherited
return GraphicsContextTemplate()
def points_to_pixels(self, points):
# if backend doesn't have dpi, e.g., postscript or svg
return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
#return points/72.0 * self.dpi.get()
class GraphicsContextTemplate(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the cairo
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In cairo this is done by wrapping a cairo.Context object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the cairo backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required.
For GUI backends - this should be overridden if drawing should be done in
interactive python mode.
"""
def show(*, block=None):
"""
For image backends - is not required.
For GUI backends - show() is usually the last line of a pyplot script and
tells the backend that it is time to draw. In interactive mode, this
should do nothing.
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, FigureClass=Figure, **kwargs):
"""
Create a new figure manager instance
"""
# If a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to do it -- see
# backend_wx, backend_wxagg and backend_tkagg for examples. Not all GUIs
# require explicit instantiation of a main-level app (e.g., backend_gtk3)
# for pylab.
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasTemplate(figure)
manager = FigureManagerTemplate(canvas, num)
return manager
class FigureCanvasTemplate(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc.
Note: GUI templates will want to connect events for button presses,
mouse movements and key presses to functions that call the base
class methods button_press_event, button_release_event,
motion_notify_event, key_press_event, and key_release_event. See the
implementations of the interactive backends for examples.
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
def draw(self):
"""
Draw the figure using the renderer
"""
renderer = RendererTemplate(self.figure.dpi)
self.figure.draw(renderer)
# You should provide a print_xxx function for every file format
# you can write.
# If the file type is not in the base set of filetypes,
# you should add it to the class-scope filetypes dictionary as follows:
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['foo'] = 'My magic Foo format'
def print_foo(self, filename, *args, **kwargs):
"""
Write out format foo. The dpi, facecolor and edgecolor are restored
to their original values after this call, so you don't need to
save and restore them.
"""
pass
def get_default_filetype(self):
return 'foo'
class FigureManagerTemplate(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasTemplate
FigureManager = FigureManagerTemplate
|
7c0ec2188332a8f928d0b9d3c8dbcbd6aeda9c643971ae3e37d80676d700551a
|
import numpy as np
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.tight_bbox import process_figure_for_rasterizing
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, figure, width, height, dpi, vector_renderer,
raster_renderer_class=None,
bbox_inches_restore=None):
"""
Parameters
----------
figure : `matplotlib.figure.Figure`
The figure instance.
width : scalar
The width of the canvas in logical units
height : scalar
The height of the canvas in logical units
dpi : scalar
The dpi of the canvas
vector_renderer : `matplotlib.backend_bases.RendererBase`
An instance of a subclass of
`~matplotlib.backend_bases.RendererBase` that will be used for the
vector drawing.
raster_renderer_class : `matplotlib.backend_bases.RendererBase`
The renderer class to use for the raster drawing. If not provided,
this will use the Agg backend (which is currently the only viable
option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
# A reference to the figure is needed as we need to change
# the figure dpi before and after the rasterization. Although
# this looks ugly, I couldn't find a better solution. -JJL
self.figure = figure
self._figdpi = figure.get_dpi()
self._bbox_inches_restore = bbox_inches_restore
self._renderer = vector_renderer
def __getattr__(self, attr):
# Proxy everything that hasn't been overridden to the base
# renderer. Things that *are* overridden can call methods
# on self._renderer directly, but must not cache/store
# methods (because things like RendererAgg change their
# methods on the fly in order to optimise proxying down
# to the underlying C implementation).
return getattr(self._renderer, attr)
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
# change the dpi of the figure temporarily.
self.figure.set_dpi(self.dpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore)
self._bbox_inches_restore = r
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._renderer = self._raster_renderer
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._renderer = self._vector_renderer
height = self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = np.frombuffer(buffer, dtype=np.uint8)
image = image.reshape((h, w, 4))
image = image[::-1]
gc = self._renderer.new_gc()
# TODO: If the mixedmode resolution differs from the figure's
# dpi, the image must be scaled (dpi->_figdpi). Not all
# backends support this.
self._renderer.draw_image(
gc,
l * self._figdpi / self.dpi,
(height-b-h) * self._figdpi / self.dpi,
image)
self._raster_renderer = None
self._rasterizing = False
# restore the figure dpi.
self.figure.set_dpi(self._figdpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore,
self._figdpi)
self._bbox_inches_restore = r
|
542e53c89697f133cb4a01efe5c900adea06ad8fca88d44c4235489cc2e57059
|
import wx
from .backend_cairo import cairo, FigureCanvasCairo, RendererCairo
from .backend_wx import (
_BackendWx, _FigureCanvasWxBase, FigureFrameWx,
NavigationToolbar2Wx as NavigationToolbar2WxCairo)
import wx.lib.wxcairo as wxcairo
class FigureFrameWxCairo(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxCairo(self, -1, fig)
class FigureCanvasWxCairo(_FigureCanvasWxBase, FigureCanvasCairo):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually) lives
inside a frame instantiated by a FigureManagerWx. The parent window
probably implements a wxSizer to control the displayed control size - but
we give a hint as to our preferred minimum size.
"""
def __init__(self, parent, id, figure):
# _FigureCanvasWxBase should be fixed to have the same signature as
# every other FigureCanvas and use cooperative inheritance, but in the
# meantime the following will make do.
_FigureCanvasWxBase.__init__(self, parent, id, figure)
FigureCanvasCairo.__init__(self, figure)
self._renderer = RendererCairo(self.figure.dpi)
def draw(self, drawDC=None):
width = int(self.figure.bbox.width)
height = int(self.figure.bbox.height)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
self._renderer.set_ctx_from_surface(surface)
self._renderer.set_width_height(width, height)
self.figure.draw(self._renderer)
self.bitmap = wxcairo.BitmapFromImageSurface(surface)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC, origin='WXCairo')
@_BackendWx.export
class _BackendWxCairo(_BackendWx):
FigureCanvas = FigureCanvasWxCairo
_frame_class = FigureFrameWxCairo
|
44a410f7c1f25eb53c24248fd2ba4fe7cc0fb93262e1261c3d67590ca6bc191f
|
from . import backend_cairo, backend_gtk3
from .backend_gtk3 import Gtk, _BackendGTK3
from matplotlib import cbook
from matplotlib.backend_bases import cursors
class RendererGTK3Cairo(backend_cairo.RendererCairo):
def set_context(self, ctx):
self.gc.ctx = backend_cairo._to_context(ctx)
class FigureCanvasGTK3Cairo(backend_gtk3.FigureCanvasGTK3,
backend_cairo.FigureCanvasCairo):
def _renderer_init(self):
"""Use cairo renderer."""
self._renderer = RendererGTK3Cairo(self.figure.dpi)
def _render_figure(self, width, height):
self._renderer.set_width_height(width, height)
self.figure.draw(self._renderer)
def on_draw_event(self, widget, ctx):
"""GtkDrawable draw event."""
# toolbar = self.toolbar
# if toolbar:
# toolbar.set_cursor(cursors.WAIT)
self._renderer.set_context(ctx)
allocation = self.get_allocation()
Gtk.render_background(
self.get_style_context(), ctx,
allocation.x, allocation.y, allocation.width, allocation.height)
self._render_figure(allocation.width, allocation.height)
# if toolbar:
# toolbar.set_cursor(toolbar._lastCursor)
return False # finish event propagation?
@cbook.deprecated("3.1", alternative="backend_gtk3.FigureManagerGTK3")
class FigureManagerGTK3Cairo(backend_gtk3.FigureManagerGTK3):
pass
@_BackendGTK3.export
class _BackendGTK3Cairo(_BackendGTK3):
FigureCanvas = FigureCanvasGTK3Cairo
|
8d46fc9888fe2255e635cb0c5a7ab20a5b2ed066d237d76fb12358953fff2cd4
|
"""
A wxPython backend for matplotlib.
Originally contributed by Jeremy O'Donoghue ([email protected]) and John
Hunter ([email protected]).
Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4.
"""
import logging
import math
import os.path
import sys
import weakref
import matplotlib
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
NavigationToolbar2, RendererBase, TimerBase, cursors, ToolContainerBase,
StatusbarBase)
from matplotlib.backend_bases import _has_pil
from matplotlib import cbook, rcParams, backend_tools
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib.widgets import SubplotTool
import wx
_log = logging.getLogger(__name__)
# Debugging settings here...
# Debug level set here. If the debug level is less than 5, information
# messages (progressively more info for lower value) are printed. In addition,
# traceback is performed, and pdb activated, for all uncaught exceptions in
# this case
_DEBUG = 5
_DEBUG_lvls = {1: 'Low ', 2: 'Med ', 3: 'High', 4: 'Error'}
def DEBUG_MSG(string, lvl=3, o=None):
if lvl >= _DEBUG:
print(f"{_DEBUG_lvls[lvl]}- {string} in {type(o)}")
@cbook.deprecated("3.1")
def debug_on_error(type, value, tb):
"""Code due to Thomas Heller - published in Python Cookbook (O'Reilly)"""
import pdb
import traceback
traceback.print_exception(type, value, tb)
print()
pdb.pm()
@cbook.deprecated("3.1")
class fake_stderr(object):
"""
Wx does strange things with stderr, as it makes the assumption that
there is probably no console. This redirects stderr to the console, since
we know that there is one!
"""
def write(self, msg):
print("Stderr: %s\n\r" % msg)
# the True dots per inch on the screen; should be display dependent
# see
# http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5
# for some info about screen dpi
PIXELS_PER_INCH = 75
# Delay time for idle checks
IDLE_DELAY = 5 # Documented as deprecated as of Matplotlib 3.1.
def error_msg_wx(msg, parent=None):
"""
Signal an error condition -- in a GUI, popup a error dialog
"""
dialog = wx.MessageDialog(parent=parent,
message=msg,
caption='Matplotlib backend_wx error',
style=wx.OK | wx.CENTRE)
dialog.ShowModal()
dialog.Destroy()
return None
@cbook.deprecated("3.1")
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines."""
if not isinstance(msg, str):
msg = '\n'.join(map(str, msg))
return msg
class TimerWx(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses WxTimer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should operate as single
shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], wx.EvtHandler):
cbook.warn_deprecated(
"3.0", message="Passing a wx.EvtHandler as first argument to "
"the TimerWx constructor is deprecated since %(since)s.")
args = args[1:]
TimerBase.__init__(self, *args, **kwargs)
self._timer = wx.Timer()
self._timer.Notify = self._on_timer
def _timer_start(self):
self._timer.Start(self._interval, self._single)
def _timer_stop(self):
self._timer.Stop()
def _timer_set_interval(self):
self._timer_start()
def _timer_set_single_shot(self):
self._timer.Start()
class RendererWx(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles. It acts as the
'renderer' instance used by many classes in the hierarchy.
"""
# In wxPython, drawing is performed on a wxDC instance, which will
# generally be mapped to the client area of the window displaying
# the plot. Under wxPython, the wxDC instance has a wx.Pen which
# describes the colour and weight of any lines drawn, and a wxBrush
# which describes the fill colour of any closed polygon.
# Font styles, families and weight.
fontweights = {
100: wx.FONTWEIGHT_LIGHT,
200: wx.FONTWEIGHT_LIGHT,
300: wx.FONTWEIGHT_LIGHT,
400: wx.FONTWEIGHT_NORMAL,
500: wx.FONTWEIGHT_NORMAL,
600: wx.FONTWEIGHT_NORMAL,
700: wx.FONTWEIGHT_BOLD,
800: wx.FONTWEIGHT_BOLD,
900: wx.FONTWEIGHT_BOLD,
'ultralight': wx.FONTWEIGHT_LIGHT,
'light': wx.FONTWEIGHT_LIGHT,
'normal': wx.FONTWEIGHT_NORMAL,
'medium': wx.FONTWEIGHT_NORMAL,
'semibold': wx.FONTWEIGHT_NORMAL,
'bold': wx.FONTWEIGHT_BOLD,
'heavy': wx.FONTWEIGHT_BOLD,
'ultrabold': wx.FONTWEIGHT_BOLD,
'black': wx.FONTWEIGHT_BOLD,
}
fontangles = {
'italic': wx.FONTSTYLE_ITALIC,
'normal': wx.FONTSTYLE_NORMAL,
'oblique': wx.FONTSTYLE_SLANT,
}
# wxPython allows for portable font styles, choosing them appropriately for
# the target platform. Map some standard font names to the portable styles.
# QUESTION: Is it be wise to agree standard fontnames across all backends?
fontnames = {
'Sans': wx.FONTFAMILY_SWISS,
'Roman': wx.FONTFAMILY_ROMAN,
'Script': wx.FONTFAMILY_SCRIPT,
'Decorative': wx.FONTFAMILY_DECORATIVE,
'Modern': wx.FONTFAMILY_MODERN,
'Courier': wx.FONTFAMILY_MODERN,
'courier': wx.FONTFAMILY_MODERN,
}
def __init__(self, bitmap, dpi):
"""Initialise a wxWindows renderer instance."""
cbook.warn_deprecated(
"2.0", name="wx", obj_type="backend", removal="the future",
alternative="wxagg", addendum="See the Matplotlib usage FAQ for "
"more info on backends.")
RendererBase.__init__(self)
DEBUG_MSG("__init__()", 1, self)
self.width = bitmap.GetWidth()
self.height = bitmap.GetHeight()
self.bitmap = bitmap
self.fontd = {}
self.dpi = dpi
self.gc = None
def flipy(self):
# docstring inherited
return True
def offset_text_height(self):
return True
def get_text_width_height_descent(self, s, prop, ismath):
# docstring inherited
if ismath:
s = cbook.strip_math(s)
if self.gc is None:
gc = self.new_gc()
else:
gc = self.gc
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
gfx_ctx.SetFont(font, wx.BLACK)
w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)
return w, h, descent
def get_canvas_width_height(self):
# docstring inherited
return self.width, self.height
def handle_clip_rectangle(self, gc):
new_bounds = gc.get_clip_rectangle()
if new_bounds is not None:
new_bounds = new_bounds.bounds
gfx_ctx = gc.gfx_ctx
if gfx_ctx._lastcliprect != new_bounds:
gfx_ctx._lastcliprect = new_bounds
if new_bounds is None:
gfx_ctx.ResetClip()
else:
gfx_ctx.Clip(new_bounds[0],
self.height - new_bounds[1] - new_bounds[3],
new_bounds[2], new_bounds[3])
@staticmethod
def convert_path(gfx_ctx, path, transform):
wxpath = gfx_ctx.CreatePath()
for points, code in path.iter_segments(transform):
if code == Path.MOVETO:
wxpath.MoveToPoint(*points)
elif code == Path.LINETO:
wxpath.AddLineToPoint(*points)
elif code == Path.CURVE3:
wxpath.AddQuadCurveToPoint(*points)
elif code == Path.CURVE4:
wxpath.AddCurveToPoint(*points)
elif code == Path.CLOSEPOLY:
wxpath.CloseSubpath()
return wxpath
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0.0, self.height)
wxpath = self.convert_path(gfx_ctx, path, transform)
if rgbFace is not None:
gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace)))
gfx_ctx.DrawPath(wxpath)
else:
gfx_ctx.StrokePath(wxpath)
gc.unselect()
def draw_image(self, gc, x, y, im):
bbox = gc.get_clip_rectangle()
if bbox is not None:
l, b, w, h = bbox.bounds
else:
l = 0
b = 0
w = self.width
h = self.height
rows, cols = im.shape[:2]
bitmap = wx.Bitmap.FromBufferRGBA(cols, rows, im.tostring())
gc = self.get_gc()
gc.select()
gc.gfx_ctx.DrawBitmap(bitmap, int(l), int(self.height - b),
int(w), int(-h))
gc.unselect()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# docstring inherited
if ismath:
s = cbook.strip_math(s)
DEBUG_MSG("draw_text()", 1, self)
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
color = gc.get_wxcolour(gc.get_rgb())
gfx_ctx.SetFont(font, color)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
x = int(x)
y = int(y - h)
if angle == 0.0:
gfx_ctx.DrawText(s, x, y)
else:
rads = math.radians(angle)
xo = h * math.sin(rads)
yo = h * math.cos(rads)
gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)
gc.unselect()
def new_gc(self):
# docstring inherited
DEBUG_MSG('new_gc()', 2, self)
self.gc = GraphicsContextWx(self.bitmap, self)
self.gc.select()
self.gc.unselect()
return self.gc
def get_gc(self):
"""
Fetch the locally cached gc.
"""
# This is a dirty hack to allow anything with access to a renderer to
# access the current graphics context
assert self.gc is not None, "gc must be defined"
return self.gc
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font = wx.Font(int(size + 0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font
def points_to_pixels(self, points):
# docstring inherited
return points * (PIXELS_PER_INCH / 72.0 * self.dpi / 72.0)
class GraphicsContextWx(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc...
This class stores a reference to a wxMemoryDC, and a
wxGraphicsContext that draws to it. Creating a wxGraphicsContext
seems to be fairly heavy, so these objects are cached based on the
bitmap object that is passed in.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0). wxPython uses an int interval, but
since wxPython colour management is rather simple, I have not chosen
to implement a separate colour manager class.
"""
_capd = {'butt': wx.CAP_BUTT,
'projecting': wx.CAP_PROJECTING,
'round': wx.CAP_ROUND}
_joind = {'bevel': wx.JOIN_BEVEL,
'miter': wx.JOIN_MITER,
'round': wx.JOIN_ROUND}
_cache = weakref.WeakKeyDictionary()
def __init__(self, bitmap, renderer):
GraphicsContextBase.__init__(self)
# assert self.Ok(), "wxMemoryDC not OK to use"
DEBUG_MSG("__init__()", 1, self)
DEBUG_MSG("__init__() 2: %s" % bitmap, 1, self)
dc, gfx_ctx = self._cache.get(bitmap, (None, None))
if dc is None:
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
gfx_ctx = wx.GraphicsContext.Create(dc)
gfx_ctx._lastcliprect = None
self._cache[bitmap] = dc, gfx_ctx
self.bitmap = bitmap
self.dc = dc
self.gfx_ctx = gfx_ctx
self._pen = wx.Pen('BLACK', 1, wx.SOLID)
gfx_ctx.SetPen(self._pen)
self.renderer = renderer
def select(self):
"""Select the current bitmap into this wxDC instance."""
if sys.platform == 'win32':
self.dc.SelectObject(self.bitmap)
self.IsSelected = True
def unselect(self):
"""Select a Null bitmap into this wxDC instance."""
if sys.platform == 'win32':
self.dc.SelectObject(wx.NullBitmap)
self.IsSelected = False
def set_foreground(self, fg, isRGBA=None):
# docstring inherited
# Implementation note: wxPython has a separate concept of pen and
# brush - the brush fills any outline trace left by the pen.
# Here we set both to the same colour - if a figure is not to be
# filled, the renderer will set the brush to be transparent
# Same goes for text foreground...
DEBUG_MSG("set_foreground()", 1, self)
self.select()
GraphicsContextBase.set_foreground(self, fg, isRGBA)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linewidth(self, w):
# docstring inherited
w = float(w)
DEBUG_MSG("set_linewidth()", 1, self)
self.select()
if 0 < w < 1:
w = 1
GraphicsContextBase.set_linewidth(self, w)
lw = int(self.renderer.points_to_pixels(self._linewidth))
if lw == 0:
lw = 1
self._pen.SetWidth(lw)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_capstyle(self, cs):
# docstring inherited
DEBUG_MSG("set_capstyle()", 1, self)
self.select()
GraphicsContextBase.set_capstyle(self, cs)
self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_joinstyle(self, js):
# docstring inherited
DEBUG_MSG("set_joinstyle()", 1, self)
self.select()
GraphicsContextBase.set_joinstyle(self, js)
self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def get_wxcolour(self, color):
"""return a wx.Colour from RGB format"""
DEBUG_MSG("get_wx_color()", 1, self)
if len(color) == 3:
r, g, b = color
r *= 255
g *= 255
b *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b))
else:
r, g, b, a = color
r *= 255
g *= 255
b *= 255
a *= 255
return wx.Colour(
red=int(r),
green=int(g),
blue=int(b),
alpha=int(a))
class _FigureCanvasWxBase(FigureCanvasBase, wx.Panel):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually) lives
inside a frame instantiated by a FigureManagerWx. The parent window
probably implements a wx.Sizer to control the displayed control size - but
we give a hint as to our preferred minimum size.
"""
keyvald = {
wx.WXK_CONTROL: 'control',
wx.WXK_SHIFT: 'shift',
wx.WXK_ALT: 'alt',
wx.WXK_LEFT: 'left',
wx.WXK_UP: 'up',
wx.WXK_RIGHT: 'right',
wx.WXK_DOWN: 'down',
wx.WXK_ESCAPE: 'escape',
wx.WXK_F1: 'f1',
wx.WXK_F2: 'f2',
wx.WXK_F3: 'f3',
wx.WXK_F4: 'f4',
wx.WXK_F5: 'f5',
wx.WXK_F6: 'f6',
wx.WXK_F7: 'f7',
wx.WXK_F8: 'f8',
wx.WXK_F9: 'f9',
wx.WXK_F10: 'f10',
wx.WXK_F11: 'f11',
wx.WXK_F12: 'f12',
wx.WXK_SCROLL: 'scroll_lock',
wx.WXK_PAUSE: 'break',
wx.WXK_BACK: 'backspace',
wx.WXK_RETURN: 'enter',
wx.WXK_INSERT: 'insert',
wx.WXK_DELETE: 'delete',
wx.WXK_HOME: 'home',
wx.WXK_END: 'end',
wx.WXK_PAGEUP: 'pageup',
wx.WXK_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD0: '0',
wx.WXK_NUMPAD1: '1',
wx.WXK_NUMPAD2: '2',
wx.WXK_NUMPAD3: '3',
wx.WXK_NUMPAD4: '4',
wx.WXK_NUMPAD5: '5',
wx.WXK_NUMPAD6: '6',
wx.WXK_NUMPAD7: '7',
wx.WXK_NUMPAD8: '8',
wx.WXK_NUMPAD9: '9',
wx.WXK_NUMPAD_ADD: '+',
wx.WXK_NUMPAD_SUBTRACT: '-',
wx.WXK_NUMPAD_MULTIPLY: '*',
wx.WXK_NUMPAD_DIVIDE: '/',
wx.WXK_NUMPAD_DECIMAL: 'dec',
wx.WXK_NUMPAD_ENTER: 'enter',
wx.WXK_NUMPAD_UP: 'up',
wx.WXK_NUMPAD_RIGHT: 'right',
wx.WXK_NUMPAD_DOWN: 'down',
wx.WXK_NUMPAD_LEFT: 'left',
wx.WXK_NUMPAD_PAGEUP: 'pageup',
wx.WXK_NUMPAD_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD_HOME: 'home',
wx.WXK_NUMPAD_END: 'end',
wx.WXK_NUMPAD_INSERT: 'insert',
wx.WXK_NUMPAD_DELETE: 'delete',
}
def __init__(self, parent, id, figure):
"""
Initialise a FigureWx instance.
- Initialise the FigureCanvasBase and wxPanel parents.
- Set event handlers for:
EVT_SIZE (Resize event)
EVT_PAINT (Paint event)
"""
FigureCanvasBase.__init__(self, figure)
# Set preferred window size hint - helps the sizer (if one is
# connected)
l, b, w, h = figure.bbox.bounds
w = math.ceil(w)
h = math.ceil(h)
wx.Panel.__init__(self, parent, id, size=wx.Size(w, h))
# Create the drawing bitmap
self.bitmap = wx.Bitmap(w, h)
DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w, h), 2, self)
# TODO: Add support for 'point' inspection and plot navigation.
self._isDrawn = False
self.Bind(wx.EVT_SIZE, self._onSize)
self.Bind(wx.EVT_PAINT, self._onPaint)
self.Bind(wx.EVT_KEY_DOWN, self._onKeyDown)
self.Bind(wx.EVT_KEY_UP, self._onKeyUp)
self.Bind(wx.EVT_RIGHT_DOWN, self._onRightButtonDown)
self.Bind(wx.EVT_RIGHT_DCLICK, self._onRightButtonDClick)
self.Bind(wx.EVT_RIGHT_UP, self._onRightButtonUp)
self.Bind(wx.EVT_MOUSEWHEEL, self._onMouseWheel)
self.Bind(wx.EVT_LEFT_DOWN, self._onLeftButtonDown)
self.Bind(wx.EVT_LEFT_DCLICK, self._onLeftButtonDClick)
self.Bind(wx.EVT_LEFT_UP, self._onLeftButtonUp)
self.Bind(wx.EVT_MOTION, self._onMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self._onLeave)
self.Bind(wx.EVT_ENTER_WINDOW, self._onEnter)
# Add middle button events
self.Bind(wx.EVT_MIDDLE_DOWN, self._onMiddleButtonDown)
self.Bind(wx.EVT_MIDDLE_DCLICK, self._onMiddleButtonDClick)
self.Bind(wx.EVT_MIDDLE_UP, self._onMiddleButtonUp)
self.Bind(wx.EVT_MOUSE_CAPTURE_CHANGED, self._onCaptureLost)
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self._onCaptureLost)
self.SetBackgroundStyle(wx.BG_STYLE_PAINT) # Reduce flicker.
self.SetBackgroundColour(wx.WHITE)
@property
@cbook.deprecated("3.0")
def macros(self):
return {}
def Copy_to_Clipboard(self, event=None):
"copy bitmap of canvas to system clipboard"
bmp_obj = wx.BitmapDataObject()
bmp_obj.SetBitmap(self.bitmap)
if not wx.TheClipboard.IsOpened():
open_success = wx.TheClipboard.Open()
if open_success:
wx.TheClipboard.SetData(bmp_obj)
wx.TheClipboard.Close()
wx.TheClipboard.Flush()
def draw_idle(self):
# docstring inherited
DEBUG_MSG("draw_idle()", 1, self)
self._isDrawn = False # Force redraw
# Triggering a paint event is all that is needed to defer drawing
# until later. The platform will send the event when it thinks it is
# a good time (usually as soon as there are no other events pending).
self.Refresh(eraseBackground=False)
def new_timer(self, *args, **kwargs):
# docstring inherited
return TimerWx(*args, **kwargs)
def flush_events(self):
# docstring inherited
wx.Yield()
def start_event_loop(self, timeout=0):
# docstring inherited
if hasattr(self, '_event_loop'):
raise RuntimeError("Event loop already running")
timer = wx.Timer(self, id=wx.ID_ANY)
if timeout > 0:
timer.Start(timeout * 1000, oneShot=True)
self.Bind(wx.EVT_TIMER, self.stop_event_loop, id=timer.GetId())
# Event loop handler for start/stop event loop
self._event_loop = wx.GUIEventLoop()
self._event_loop.Run()
timer.Stop()
def stop_event_loop(self, event=None):
# docstring inherited
if hasattr(self, '_event_loop'):
if self._event_loop.IsRunning():
self._event_loop.Exit()
del self._event_loop
def _get_imagesave_wildcards(self):
'return the wildcard string for the filesave dialog'
default_filetype = self.get_default_filetype()
filetypes = self.get_supported_filetypes_grouped()
sorted_filetypes = sorted(filetypes.items())
wildcards = []
extensions = []
filter_index = 0
for i, (name, exts) in enumerate(sorted_filetypes):
ext_list = ';'.join(['*.%s' % ext for ext in exts])
extensions.append(exts[0])
wildcard = '%s (%s)|%s' % (name, ext_list, ext_list)
if default_filetype in exts:
filter_index = i
wildcards.append(wildcard)
wildcards = '|'.join(wildcards)
return wildcards, extensions, filter_index
def gui_repaint(self, drawDC=None, origin='WX'):
"""
Performs update of the displayed image on the GUI canvas, using the
supplied wx.PaintDC device context.
The 'WXAgg' backend sets origin accordingly.
"""
DEBUG_MSG("gui_repaint()", 1, self)
if self.IsShownOnScreen():
if not drawDC:
# not called from OnPaint use a ClientDC
drawDC = wx.ClientDC(self)
# following is for 'WX' backend on Windows
# the bitmap can not be in use by another DC,
# see GraphicsContextWx._cache
if wx.Platform == '__WXMSW__' and origin == 'WX':
img = self.bitmap.ConvertToImage()
bmp = img.ConvertToBitmap()
drawDC.DrawBitmap(bmp, 0, 0)
else:
drawDC.DrawBitmap(self.bitmap, 0, 0)
filetypes = {
**FigureCanvasBase.filetypes,
'bmp': 'Windows bitmap',
'jpeg': 'JPEG',
'jpg': 'JPEG',
'pcx': 'PCX',
'png': 'Portable Network Graphics',
'tif': 'Tagged Image Format File',
'tiff': 'Tagged Image Format File',
'xpm': 'X pixmap',
}
def print_figure(self, filename, *args, **kwargs):
# docstring inherited
super().print_figure(filename, *args, **kwargs)
# Restore the current view; this is needed because the artist contains
# methods rely on particular attributes of the rendered figure for
# determining things like bounding boxes.
if self._isDrawn:
self.draw()
def _onPaint(self, evt):
"""
Called when wxPaintEvt is generated
"""
DEBUG_MSG("_onPaint()", 1, self)
drawDC = wx.PaintDC(self)
if not self._isDrawn:
self.draw(drawDC=drawDC)
else:
self.gui_repaint(drawDC=drawDC)
drawDC.Destroy()
def _onSize(self, evt):
"""
Called when wxEventSize is generated.
In this application we attempt to resize to fit the window, so it
is better to take the performance hit and redraw the whole window.
"""
DEBUG_MSG("_onSize()", 2, self)
sz = self.GetParent().GetSizer()
if sz:
si = sz.GetItem(self)
if sz and si and not si.Proportion and not si.Flag & wx.EXPAND:
# managed by a sizer, but with a fixed size
size = self.GetMinSize()
else:
# variable size
size = self.GetClientSize()
if getattr(self, "_width", None):
if size == (self._width, self._height):
# no change in size
return
self._width, self._height = size
# Create a new, correctly sized bitmap
self.bitmap = wx.Bitmap(self._width, self._height)
self._isDrawn = False
if self._width <= 1 or self._height <= 1:
return # Empty figure
dpival = self.figure.dpi
winch = self._width / dpival
hinch = self._height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# Rendering will happen on the associated paint event
# so no need to do anything here except to make sure
# the whole background is repainted.
self.Refresh(eraseBackground=False)
FigureCanvasBase.resize_event(self)
def _get_key(self, evt):
keyval = evt.KeyCode
if keyval in self.keyvald:
key = self.keyvald[keyval]
elif keyval < 256:
key = chr(keyval)
# wx always returns an uppercase, so make it lowercase if the shift
# key is not depressed (NOTE: this will not handle Caps Lock)
if not evt.ShiftDown():
key = key.lower()
else:
key = None
for meth, prefix in (
[evt.AltDown, 'alt'],
[evt.ControlDown, 'ctrl'], ):
if meth():
key = '{0}+{1}'.format(prefix, key)
return key
def _onKeyDown(self, evt):
"""Capture key press."""
key = self._get_key(evt)
FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
if self:
evt.Skip()
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
if self:
evt.Skip()
def _set_capture(self, capture=True):
"""control wx mouse capture """
if self.HasCapture():
self.ReleaseMouse()
if capture:
self.CaptureMouse()
def _onCaptureLost(self, evt):
"""Capture changed or lost"""
self._set_capture(False)
def _onRightButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt)
def _onRightButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 3,
dblclick=True, guiEvent=evt)
def _onRightButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(False)
FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt)
def _onLeftButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt)
def _onLeftButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 1,
dblclick=True, guiEvent=evt)
def _onLeftButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(False)
FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt)
# Add middle button events
def _onMiddleButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 2, guiEvent=evt)
def _onMiddleButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 2,
dblclick=True, guiEvent=evt)
def _onMiddleButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(False)
FigureCanvasBase.button_release_event(self, x, y, 2, guiEvent=evt)
def _onMouseWheel(self, evt):
"""Translate mouse wheel events into matplotlib events"""
# Determine mouse location
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# Convert delta/rotation/rate into a floating point step size
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
step = rate * rotation / delta
# Done handling event
evt.Skip()
# Mac is giving two events for every wheel event
# Need to skip every second one
if wx.Platform == '__WXMAC__':
if not hasattr(self, '_skipwheelevent'):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return # Return without processing event
else:
self._skipwheelevent = True
# Convert to mpl event
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt)
def _onMotion(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt)
def _onLeave(self, evt):
"""Mouse has left the window."""
evt.Skip()
FigureCanvasBase.leave_notify_event(self, guiEvent=evt)
def _onEnter(self, evt):
"""Mouse has entered the window."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.enter_notify_event(self, guiEvent=evt, xy=(x, y))
class FigureCanvasWx(_FigureCanvasWxBase):
# Rendering to a Wx canvas using the deprecated Wx renderer.
def draw(self, drawDC=None):
"""
Render the figure using RendererWx instance renderer, or using a
previously defined renderer if none is specified.
"""
DEBUG_MSG("draw()", 1, self)
self.renderer = RendererWx(self.bitmap, self.figure.dpi)
self.figure.draw(self.renderer)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def print_bmp(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs)
if not _has_pil:
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_JPEG,
*args, **kwargs)
print_jpg = print_jpeg
def print_pcx(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs)
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs)
if not _has_pil:
def print_tiff(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_TIF,
*args, **kwargs)
print_tif = print_tiff
def print_xpm(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs)
def _print_image(self, filename, filetype, *args, **kwargs):
origBitmap = self.bitmap
l, b, width, height = self.figure.bbox.bounds
width = math.ceil(width)
height = math.ceil(height)
self.bitmap = wx.Bitmap(width, height)
renderer = RendererWx(self.bitmap, self.figure.dpi)
gc = renderer.new_gc()
self.figure.draw(renderer)
# image is the object that we call SaveFile on.
image = self.bitmap
# set the JPEG quality appropriately. Unfortunately, it is only
# possible to set the quality on a wx.Image object. So if we
# are saving a JPEG, convert the wx.Bitmap to a wx.Image,
# and set the quality.
if filetype == wx.BITMAP_TYPE_JPEG:
jpeg_quality = kwargs.get('quality',
rcParams['savefig.jpeg_quality'])
image = self.bitmap.ConvertToImage()
image.SetOption(wx.IMAGE_OPTION_QUALITY, str(jpeg_quality))
# Now that we have rendered into the bitmap, save it to the appropriate
# file type and clean up.
if isinstance(filename, str):
if not image.SaveFile(filename, filetype):
raise RuntimeError(f'Could not save figure to {filename}')
elif cbook.is_writable_file_like(filename):
if not isinstance(image, wx.Image):
image = image.ConvertToImage()
if not image.SaveStream(filename, filetype):
raise RuntimeError(f'Could not save figure to {filename}')
# Restore everything to normal
self.bitmap = origBitmap
# Note: draw is required here since bits of state about the
# last renderer are strewn about the artist draw methods. Do
# not remove the draw without first verifying that these have
# been cleaned up. The artist contains() methods will fail
# otherwise.
if self._isDrawn:
self.draw()
self.Refresh()
########################################################################
#
# The following functions and classes are for pylab compatibility
# mode (matplotlib.pylab) and implement figure managers, etc...
#
########################################################################
class FigureFrameWx(wx.Frame):
def __init__(self, num, fig):
# On non-Windows platform, explicitly set the position - fix
# positioning bug on some Linux platforms
if wx.Platform == '__WXMSW__':
pos = wx.DefaultPosition
else:
pos = wx.Point(20, 20)
wx.Frame.__init__(self, parent=None, id=-1, pos=pos,
title="Figure %d" % num)
# Frame will be sized later by the Fit method
DEBUG_MSG("__init__()", 1, self)
self.num = num
_set_frame_icon(self)
self.canvas = self.get_canvas(fig)
self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height))
self.canvas.SetFocus()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version
self.toolmanager = self._get_toolmanager()
if self.toolmanager:
self.statusbar = StatusbarWx(self, self.toolmanager)
else:
self.statusbar = StatusBarWx(self)
self.SetStatusBar(self.statusbar)
self.toolbar = self._get_toolbar(self.statusbar)
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
if self.toolbar is not None:
self.toolbar.Realize()
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSize()
fw, fh = self.canvas.GetSize()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
self.canvas.SetMinSize((2, 2))
self.figmgr = FigureManagerWx(self.canvas, num, self)
self.Bind(wx.EVT_CLOSE, self._onClose)
def _get_toolbar(self, statbar):
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2Wx(self.canvas)
toolbar.set_status_bar(statbar)
elif matplotlib.rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarWx(self.toolmanager, self)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
if matplotlib.rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas.figure)
else:
toolmanager = None
return toolmanager
def get_canvas(self, fig):
return FigureCanvasWx(self, -1, fig)
def get_figure_manager(self):
DEBUG_MSG("get_figure_manager()", 1, self)
return self.figmgr
def _onClose(self, evt):
DEBUG_MSG("onClose()", 1, self)
self.canvas.close_event()
self.canvas.stop_event_loop()
Gcf.destroy(self.num)
# self.Destroy()
def GetToolBar(self):
"""Override wxFrame::GetToolBar as we don't have managed toolbar"""
return self.toolbar
def Destroy(self, *args, **kwargs):
try:
self.canvas.mpl_disconnect(self.toolbar._idDrag)
# Rationale for line above: see issue 2941338.
except AttributeError:
pass # classic toolbar lacks the attribute
if not self.IsBeingDeleted():
wx.Frame.Destroy(self, *args, **kwargs)
if self.toolbar is not None:
self.toolbar.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
return True
class FigureManagerWx(FigureManagerBase):
"""
This class contains the FigureCanvas and GUI frame
It is instantiated by GcfWx whenever a new figure is created. GcfWx is
responsible for managing multiple instances of FigureManagerWx.
Attributes
----------
canvas : `FigureCanvas`
a FigureCanvasWx(wx.Panel) instance
window : wxFrame
a wxFrame instance - wxpython.org/Phoenix/docs/html/Frame.html
"""
def __init__(self, canvas, num, frame):
DEBUG_MSG("__init__()", 1, self)
FigureManagerBase.__init__(self, canvas, num)
self.frame = frame
self.window = frame
self.toolmanager = getattr(frame, "toolmanager", None)
self.toolbar = frame.GetToolBar()
def show(self):
self.frame.Show()
self.canvas.draw()
def destroy(self, *args):
DEBUG_MSG("destroy()", 1, self)
self.frame.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
def get_window_title(self):
return self.window.GetTitle()
def set_window_title(self, title):
self.window.SetTitle(title)
def resize(self, width, height):
'Set the canvas size in pixels'
self.canvas.SetInitialSize(wx.Size(width, height))
self.window.GetSizer().Fit(self.window)
def _load_bitmap(filename):
"""
Load a bitmap file from the backends/images subdirectory in which the
matplotlib library is installed. The filename parameter should not
contain any path information as this is determined automatically.
Returns a wx.Bitmap object
"""
basedir = os.path.join(rcParams['datapath'], 'images')
bmpFilename = os.path.normpath(os.path.join(basedir, filename))
if not os.path.exists(bmpFilename):
raise IOError('Could not find bitmap file "%s"; dying' % bmpFilename)
bmp = wx.Bitmap(bmpFilename)
return bmp
def _set_frame_icon(frame):
# set frame icon
bundle = wx.IconBundle()
for image in ('matplotlib.png', 'matplotlib_large.png'):
image = os.path.join(matplotlib.rcParams['datapath'], 'images', image)
if not os.path.exists(image):
continue
icon = wx.Icon(_load_bitmap(image))
if not icon.IsOk():
return
bundle.AddIcon(icon)
frame.SetIcons(bundle)
@cbook.deprecated("3.1")
class MenuButtonWx(wx.Button):
"""
wxPython does not permit a menu to be incorporated directly into a toolbar.
This class simulates the effect by associating a pop-up menu with a button
in the toolbar, and managing this as though it were a menu.
"""
def __init__(self, parent):
wx.Button.__init__(self, parent, wx.ID_ANY, "Axes: ",
style=wx.BU_EXACTFIT)
self._toolbar = parent
self._menu = wx.Menu()
self._axisId = []
# First two menu items never change...
self._allId = wx.NewId()
self._invertId = wx.NewId()
self._menu.Append(self._allId, "All", "Select all axes", False)
self._menu.Append(self._invertId, "Invert", "Invert axes selected",
False)
self._menu.AppendSeparator()
self.Bind(wx.EVT_BUTTON, self._onMenuButton, id=self.GetId())
self.Bind(wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId)
self.Bind(wx.EVT_MENU, self._handleInvertAxesSelected,
id=self._invertId)
def Destroy(self):
self._menu.Destroy()
self.Destroy()
def _onMenuButton(self, evt):
"""Handle menu button pressed."""
x, y = self.GetPosition()
w, h = self.GetSize()
self.PopupMenuXY(self._menu, x, y + h - 4)
# When menu returned, indicate selection in button
evt.Skip()
def _handleSelectAllAxes(self, evt):
"""Called when the 'select all axes' menu item is selected."""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _handleInvertAxesSelected(self, evt):
"""Called when the invert all menu item is selected"""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
self._menu.Check(self._axisId[i], False)
else:
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _onMenuItemSelected(self, evt):
"""Called whenever one of the specific axis menu items is selected"""
current = self._menu.IsChecked(evt.GetId())
if current:
new = False
else:
new = True
self._menu.Check(evt.GetId(), new)
# Lines above would be deleted based on svn tracker ID 2841525;
# not clear whether this matters or not.
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def updateAxes(self, maxAxis):
"""Ensures that there are entries for max_axis axes in the menu
(selected by default)."""
if maxAxis > len(self._axisId):
for i in range(len(self._axisId) + 1, maxAxis + 1):
menuId = wx.NewId()
self._axisId.append(menuId)
self._menu.Append(menuId, "Axis %d" % i,
"Select axis %d" % i,
True)
self._menu.Check(menuId, True)
self.Bind(wx.EVT_MENU, self._onMenuItemSelected, id=menuId)
elif maxAxis < len(self._axisId):
for menuId in self._axisId[maxAxis:]:
self._menu.Delete(menuId)
self._axisId = self._axisId[:maxAxis]
self._toolbar.set_active(list(range(maxAxis)))
def getActiveAxes(self):
"""Return a list of the selected axes."""
active = [idx for idx, ax_id in enumerate(self._axisId)
if self._menu.IsChecked(ax_id)]
return active
def updateButtonText(self, lst):
"""Update the list of selected axes in the menu button."""
self.SetLabel(
'Axes: ' + ','.join('%d' % (e + 1) for e in lst))
cursord = {
cursors.MOVE: wx.CURSOR_HAND,
cursors.HAND: wx.CURSOR_HAND,
cursors.POINTER: wx.CURSOR_ARROW,
cursors.SELECT_REGION: wx.CURSOR_CROSS,
cursors.WAIT: wx.CURSOR_WAIT,
}
class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar):
def __init__(self, canvas):
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
NavigationToolbar2.__init__(self, canvas)
self.canvas = canvas
self._idle = True
self.statbar = None
self.prevZoomRect = None
# for now, use alternate zoom-rectangle drawing on all
# Macs. N.B. In future versions of wx it may be possible to
# detect Retina displays with window.GetContentScaleFactor()
# and/or dc.GetContentScaleFactor()
self.retinaFix = 'wxMac' in wx.PlatformInfo
def get_canvas(self, frame, fig):
return type(self.canvas)(frame, -1, fig)
def _init_toolbar(self):
DEBUG_MSG("_init_toolbar", 1, self)
self._parent = self.canvas.GetParent()
self.wx_ids = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.AddSeparator()
continue
self.wx_ids[text] = (
self.AddTool(
-1,
bitmap=_load_bitmap(image_file + ".png"),
bmpDisabled=wx.NullBitmap,
label=text, shortHelp=text, longHelp=tooltip_text,
kind=(wx.ITEM_CHECK if text in ["Pan", "Zoom"]
else wx.ITEM_NORMAL))
.Id)
self.Bind(wx.EVT_TOOL, getattr(self, callback),
id=self.wx_ids[text])
self.Realize()
def zoom(self, *args):
self.ToggleTool(self.wx_ids['Pan'], False)
NavigationToolbar2.zoom(self, *args)
def pan(self, *args):
self.ToggleTool(self.wx_ids['Zoom'], False)
NavigationToolbar2.pan(self, *args)
def configure_subplots(self, evt):
global FigureManager # placates pyflakes: created by @_Backend.export
frame = wx.Frame(None, -1, "Configure subplots")
_set_frame_icon(frame)
toolfig = Figure((6, 3))
canvas = self.get_canvas(frame, toolfig)
# Create a figure manager to manage things
FigureManager(canvas, 1, frame)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
frame.SetSizer(sizer)
frame.Fit()
SubplotTool(self.canvas.figure, toolfig)
frame.Show()
def save_figure(self, *args):
# Fetch the required filename and file type.
filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
default_file = self.canvas.get_default_filename()
dlg = wx.FileDialog(self.canvas.GetParent(),
"Save to file", "", default_file, filetypes,
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(filter_index)
if dlg.ShowModal() == wx.ID_OK:
dirname = dlg.GetDirectory()
filename = dlg.GetFilename()
DEBUG_MSG(
'Save file dir:%s name:%s' %
(dirname, filename), 3, self)
format = exts[dlg.GetFilterIndex()]
basename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format != ext:
# looks like they forgot to set the image type drop
# down, going with the extension.
_log.warning('extension %s did not match the selected '
'image type %s; going with %s',
ext, format, ext)
format = ext
try:
self.canvas.figure.savefig(
os.path.join(dirname, filename), format=format)
except Exception as e:
error_msg_wx(str(e))
def set_cursor(self, cursor):
cursor = wx.Cursor(cursord[cursor])
self.canvas.SetCursor(cursor)
self.canvas.Update()
def press(self, event):
if self._active == 'ZOOM':
if not self.retinaFix:
self.wxoverlay = wx.Overlay()
else:
if event.inaxes is not None:
self.savedRetinaImage = self.canvas.copy_from_bbox(
event.inaxes.bbox)
self.zoomStartX = event.xdata
self.zoomStartY = event.ydata
self.zoomAxes = event.inaxes
def release(self, event):
if self._active == 'ZOOM':
# When the mouse is released we reset the overlay and it
# restores the former content to the window.
if not self.retinaFix:
self.wxoverlay.Reset()
del self.wxoverlay
else:
del self.savedRetinaImage
if self.prevZoomRect:
self.prevZoomRect.pop(0).remove()
self.prevZoomRect = None
if self.zoomAxes:
self.zoomAxes = None
def draw_rubberband(self, event, x0, y0, x1, y1):
if self.retinaFix: # On Macs, use the following code
# wx.DCOverlay does not work properly on Retina displays.
rubberBandColor = '#C0C0FF'
if self.prevZoomRect:
self.prevZoomRect.pop(0).remove()
self.canvas.restore_region(self.savedRetinaImage)
X0, X1 = self.zoomStartX, event.xdata
Y0, Y1 = self.zoomStartY, event.ydata
lineX = (X0, X0, X1, X1, X0)
lineY = (Y0, Y1, Y1, Y0, Y0)
self.prevZoomRect = self.zoomAxes.plot(
lineX, lineY, '-', color=rubberBandColor)
self.zoomAxes.draw_artist(self.prevZoomRect[0])
self.canvas.blit(self.zoomAxes.bbox)
return
# Use an Overlay to draw a rubberband-like bounding box.
dc = wx.ClientDC(self.canvas)
odc = wx.DCOverlay(self.wxoverlay, dc)
odc.Clear()
# Mac's DC is already the same as a GCDC, and it causes
# problems with the overlay if we try to use an actual
# wx.GCDC so don't try it.
if 'wxMac' not in wx.PlatformInfo:
dc = wx.GCDC(dc)
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1 < y0:
y0, y1 = y1, y0
if x1 < x0:
x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = wx.Rect(x0, y0, w, h)
rubberBandColor = '#C0C0FF' # or load from config?
# Set a pen for the border
color = wx.Colour(rubberBandColor)
dc.SetPen(wx.Pen(color, 1))
# use the same color, plus alpha for the brush
r, g, b, a = color.Get(True)
color.Set(r, g, b, 0x60)
dc.SetBrush(wx.Brush(color))
dc.DrawRectangle(rect)
def set_status_bar(self, statbar):
self.statbar = statbar
def set_message(self, s):
if self.statbar is not None:
self.statbar.set_function(s)
def set_history_buttons(self):
can_backward = self._nav_stack._pos > 0
can_forward = self._nav_stack._pos < len(self._nav_stack._elements) - 1
self.EnableTool(self.wx_ids['Back'], can_backward)
self.EnableTool(self.wx_ids['Forward'], can_forward)
class StatusBarWx(wx.StatusBar):
"""
A status bar is added to _FigureFrame to allow measurements and the
previously selected scroll function to be displayed as a user
convenience.
"""
def __init__(self, parent, *args, **kwargs):
wx.StatusBar.__init__(self, parent, -1)
self.SetFieldsCount(2)
def set_function(self, string):
self.SetStatusText("%s" % string, 1)
# tools for matplotlib.backend_managers.ToolManager:
class ToolbarWx(ToolContainerBase, wx.ToolBar):
def __init__(self, toolmanager, parent, style=wx.TB_HORIZONTAL):
ToolContainerBase.__init__(self, toolmanager)
wx.ToolBar.__init__(self, parent, -1, style=style)
self._toolitems = {}
self._groups = {}
def add_toolitem(
self, name, group, position, image_file, description, toggle):
before, group = self._add_to_group(group, name, position)
idx = self.GetToolPos(before.Id)
if image_file:
bmp = _load_bitmap(image_file)
kind = wx.ITEM_NORMAL if not toggle else wx.ITEM_CHECK
tool = self.InsertTool(idx, -1, name, bmp, wx.NullBitmap, kind,
description or "")
else:
size = (self.GetTextExtent(name)[0]+10, -1)
if toggle:
control = wx.ToggleButton(self, -1, name, size=size)
else:
control = wx.Button(self, -1, name, size=size)
tool = self.InsertControl(idx, control, label=name)
self.Realize()
def handler(event):
self.trigger_tool(name)
if image_file:
self.Bind(wx.EVT_TOOL, handler, tool)
else:
control.Bind(wx.EVT_LEFT_DOWN, handler)
self._toolitems.setdefault(name, [])
group.insert(position, tool)
self._toolitems[name].append((tool, handler))
def _add_to_group(self, group, name, position):
gr = self._groups.get(group, [])
if not gr:
sep = self.AddSeparator()
gr.append(sep)
before = gr[position]
self._groups[group] = gr
return before, gr
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for tool, handler in self._toolitems[name]:
if not tool.IsControl():
self.ToggleTool(tool.Id, toggled)
else:
tool.GetControl().SetValue(toggled)
self.Refresh()
def remove_toolitem(self, name):
for tool, handler in self._toolitems[name]:
self.DeleteTool(tool.Id)
del self._toolitems[name]
class StatusbarWx(StatusbarBase, wx.StatusBar):
"""for use with ToolManager"""
def __init__(self, parent, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
wx.StatusBar.__init__(self, parent, -1)
self.SetFieldsCount(1)
self.SetStatusText("")
def set_message(self, s):
self.SetStatusText(s)
class ConfigureSubplotsWx(backend_tools.ConfigureSubplotsBase):
def trigger(self, *args):
self.configure_subplots()
def configure_subplots(self):
frame = wx.Frame(None, -1, "Configure subplots")
_set_frame_icon(frame)
toolfig = Figure((6, 3))
canvas = self.get_canvas(frame, toolfig)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
frame.SetSizer(sizer)
frame.Fit()
SubplotTool(self.canvas.figure, toolfig)
frame.Show()
def get_canvas(self, frame, fig):
return type(self.canvas)(frame, -1, fig)
class SaveFigureWx(backend_tools.SaveFigureBase):
def trigger(self, *args):
NavigationToolbar2Wx.save_figure(
self._make_classic_style_pseudo_toolbar())
class SetCursorWx(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
NavigationToolbar2Wx.set_cursor(
self._make_classic_style_pseudo_toolbar(), cursor)
if 'wxMac' not in wx.PlatformInfo:
# on most platforms, use overlay
class RubberbandWx(backend_tools.RubberbandBase):
def __init__(self, *args, **kwargs):
backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
self.wxoverlay = None
def draw_rubberband(self, x0, y0, x1, y1):
# Use an Overlay to draw a rubberband-like bounding box.
if self.wxoverlay is None:
self.wxoverlay = wx.Overlay()
dc = wx.ClientDC(self.canvas)
odc = wx.DCOverlay(self.wxoverlay, dc)
odc.Clear()
dc = wx.GCDC(dc)
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1 < y0:
y0, y1 = y1, y0
if x1 < x0:
x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = wx.Rect(x0, y0, w, h)
rubberBandColor = '#C0C0FF' # or load from config?
# Set a pen for the border
color = wx.Colour(rubberBandColor)
dc.SetPen(wx.Pen(color, 1))
# use the same color, plus alpha for the brush
r, g, b, a = color.Get(True)
color.Set(r, g, b, 0x60)
dc.SetBrush(wx.Brush(color))
dc.DrawRectangle(rect)
def remove_rubberband(self):
if self.wxoverlay is None:
return
self.wxoverlay.Reset()
self.wxoverlay = None
else:
# on Mac OS retina displays DCOverlay does not work
# and dc.SetLogicalFunction does not have an effect on any display
# the workaround is to blit the full image for remove_rubberband
class RubberbandWx(backend_tools.RubberbandBase):
def __init__(self, *args, **kwargs):
backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
self._rect = None
def draw_rubberband(self, x0, y0, x1, y1):
dc = wx.ClientDC(self.canvas)
# this would be required if the Canvas is a ScrolledWindow,
# which is not the case for now
# self.PrepareDC(dc)
# delete old rubberband
if self._rect:
self.remove_rubberband(dc)
# draw new rubberband
dc.SetPen(wx.Pen(wx.BLACK, 1, wx.SOLID))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
self._rect = (x0, self.canvas._height-y0, x1-x0, -y1+y0)
dc.DrawRectangle(self._rect)
def remove_rubberband(self, dc=None):
if not self._rect:
return
if self.canvas.bitmap:
if dc is None:
dc = wx.ClientDC(self.canvas)
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
# for testing the method on Windows, use this code instead:
# img = self.canvas.bitmap.ConvertToImage()
# bmp = img.ConvertToBitmap()
# dc.DrawBitmap(bmp, 0, 0)
self._rect = None
class _HelpDialog(wx.Dialog):
_instance = None # a reference to an open dialog singleton
headers = [("Action", "Shortcuts", "Description")]
widths = [100, 140, 300]
def __init__(self, parent, help_entries):
wx.Dialog.__init__(self, parent, title="Help",
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
sizer = wx.BoxSizer(wx.VERTICAL)
grid_sizer = wx.FlexGridSizer(0, 3, 8, 6)
# create and add the entries
bold = self.GetFont().MakeBold()
for r, row in enumerate(self.headers + help_entries):
for (col, width) in zip(row, self.widths):
label = wx.StaticText(self, label=col)
if r == 0:
label.SetFont(bold)
label.Wrap(width)
grid_sizer.Add(label, 0, 0, 0)
# finalize layout, create button
sizer.Add(grid_sizer, 0, wx.ALL, 6)
OK = wx.Button(self, wx.ID_OK)
sizer.Add(OK, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 8)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.Bind(wx.EVT_CLOSE, self.OnClose)
OK.Bind(wx.EVT_BUTTON, self.OnClose)
def OnClose(self, evt):
_HelpDialog._instance = None # remove global reference
self.DestroyLater()
evt.Skip()
@classmethod
def show(cls, parent, help_entries):
# if no dialog is shown, create one; otherwise just re-raise it
if cls._instance:
cls._instance.Raise()
return
cls._instance = cls(parent, help_entries)
cls._instance.Show()
class HelpWx(backend_tools.ToolHelpBase):
def trigger(self, *args):
_HelpDialog.show(self.figure.canvas.GetTopLevelParent(),
self._get_help_entries())
class ToolCopyToClipboardWx(backend_tools.ToolCopyToClipboardBase):
def trigger(self, *args, **kwargs):
if not self.canvas._isDrawn:
self.canvas.draw()
if not self.canvas.bitmap.IsOk() or not wx.TheClipboard.Open():
return
try:
wx.TheClipboard.SetData(wx.BitmapDataObject(self.canvas.bitmap))
finally:
wx.TheClipboard.Close()
backend_tools.ToolSaveFigure = SaveFigureWx
backend_tools.ToolConfigureSubplots = ConfigureSubplotsWx
backend_tools.ToolSetCursor = SetCursorWx
backend_tools.ToolRubberband = RubberbandWx
backend_tools.ToolHelp = HelpWx
backend_tools.ToolCopyToClipboard = ToolCopyToClipboardWx
# < Additions for printing support: Matt Newville
@cbook.deprecated("3.1")
class PrintoutWx(wx.Printout):
"""
Simple wrapper around wx Printout class -- all the real work
here is scaling the matplotlib canvas bitmap to the current
printer's definition.
"""
def __init__(self, canvas, width=5.5, margin=0.5, title='matplotlib'):
wx.Printout.__init__(self, title=title)
self.canvas = canvas
# width, in inches of output figure (approximate)
self.width = width
self.margin = margin
def HasPage(self, page):
# current only supports 1 page print
return page == 1
def GetPageInfo(self):
return (1, 1, 1, 1)
def OnPrintPage(self, page):
self.canvas.draw()
dc = self.GetDC()
ppw, pph = self.GetPPIPrinter() # printer's pixels per in
pgw, pgh = self.GetPageSizePixels() # page size in pixels
dcw, dch = dc.GetSize()
grw, grh = self.canvas.GetSize()
# save current figure dpi resolution and bg color,
# so that we can temporarily set them to the dpi of
# the printer, and the bg color to white
bgcolor = self.canvas.figure.get_facecolor()
fig_dpi = self.canvas.figure.dpi
# draw the bitmap, scaled appropriately
vscale = float(ppw) / fig_dpi
# set figure resolution,bg color for printer
self.canvas.figure.dpi = ppw
self.canvas.figure.set_facecolor('#FFFFFF')
renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
self.canvas.figure.draw(renderer)
self.canvas.bitmap.SetWidth(
int(self.canvas.bitmap.GetWidth() * vscale))
self.canvas.bitmap.SetHeight(
int(self.canvas.bitmap.GetHeight() * vscale))
self.canvas.draw()
# page may need additional scaling on preview
page_scale = 1.0
if self.IsPreview():
page_scale = float(dcw) / pgw
# get margin in pixels = (margin in in) * (pixels/in)
top_margin = int(self.margin * pph * page_scale)
left_margin = int(self.margin * ppw * page_scale)
# set scale so that width of output is self.width inches
# (assuming grw is size of graph in inches....)
user_scale = (self.width * fig_dpi * page_scale) / float(grw)
dc.SetDeviceOrigin(left_margin, top_margin)
dc.SetUserScale(user_scale, user_scale)
# this cute little number avoid API inconsistencies in wx
try:
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
except Exception:
try:
dc.DrawBitmap(self.canvas.bitmap, (0, 0))
except Exception:
pass
# restore original figure resolution
self.canvas.figure.set_facecolor(bgcolor)
self.canvas.figure.dpi = fig_dpi
self.canvas.draw()
return True
@_Backend.export
class _BackendWx(_Backend):
required_interactive_framework = "wx"
FigureCanvas = FigureCanvasWx
FigureManager = FigureManagerWx
_frame_class = FigureFrameWx
@staticmethod
def trigger_manager_draw(manager):
manager.canvas.draw_idle()
@classmethod
def new_figure_manager(cls, num, *args, **kwargs):
# Create a wx.App instance if it has not been created so far.
wxapp = wx.GetApp()
if wxapp is None:
wxapp = wx.App(False)
wxapp.SetExitOnFrameDelete(True)
# Retain a reference to the app object so that it does not get
# garbage collected.
_BackendWx._theWxApp = wxapp
return super().new_figure_manager(num, *args, **kwargs)
@classmethod
def new_figure_manager_given_figure(cls, num, figure):
frame = cls._frame_class(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
figure.canvas.draw_idle()
return figmgr
@staticmethod
def mainloop():
if not wx.App.IsMainLoopRunning():
wxapp = wx.GetApp()
if wxapp is not None:
wxapp.MainLoop()
|
9bf188fd6024ba533d29d25ad2b469988d902fead980f43fab6d03ebb3989aa2
|
import os
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
TimerBase)
from matplotlib.figure import Figure
from matplotlib import rcParams
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
from .backend_agg import FigureCanvasAgg
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
class TimerMac(_macosx.Timer, TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses CoreFoundation
run loops for timer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should operate as single
shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
# completely implemented at the C-level (in _macosx.Timer)
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
_macosx.FigureCanvas.__init__(self, width, height)
self._device_scale = 1.0
def _set_device_scale(self, value):
if self._device_scale != value:
self.figure.dpi = self.figure.dpi / self._device_scale * value
self._device_scale = value
def _draw(self):
renderer = self.get_renderer(cleared=self.figure.stale)
if self.figure.stale:
self.figure.draw(renderer)
return renderer
def draw(self):
# docstring inherited
self.invalidate()
self.flush_events()
def draw_idle(self, *args, **kwargs):
# docstring inherited
self.invalidate()
def blit(self, bbox=None):
self.invalidate()
def resize(self, width, height):
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width * self._device_scale,
height * self._device_scale,
forward=False)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def new_timer(self, *args, **kwargs):
# docstring inherited
return TimerMac(*args, **kwargs)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if rcParams['toolbar'] == 'toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
if matplotlib.is_interactive():
self.show()
self.canvas.draw_idle()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(int(x0), int(y0), int(x1), int(y1))
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self, *args):
filename = _macosx.choose_save_file('Save the figure',
self.canvas.get_default_filename())
if filename is None: # Cancel
return
self.canvas.figure.savefig(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6, 3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
@_Backend.export
class _BackendMac(_Backend):
required_interactive_framework = "macosx"
FigureCanvas = FigureCanvasMac
FigureManager = FigureManagerMac
@staticmethod
def trigger_manager_draw(manager):
# For performance reasons, we don't want to redraw the figure after
# each draw command. Instead, we mark the figure as invalid, so that it
# will be redrawn as soon as the event loop resumes via PyOS_InputHook.
# This function should be called after each draw event, even if
# matplotlib is not running interactively.
manager.canvas.invalidate()
@staticmethod
def mainloop():
_macosx.show()
|
e4b9fe0a927ae52f20ac11c45b7814a9efcf70c9822a033e767d9d2ee96d1507
|
"""
Displays Agg images in the browser, with interactivity
"""
# The WebAgg backend is divided into two modules:
#
# - `backend_webagg_core.py` contains code necessary to embed a WebAgg
# plot inside of a web application, and communicate in an abstract
# way over a web socket.
#
# - `backend_webagg.py` contains a concrete implementation of a basic
# application, implemented with tornado.
import datetime
from io import StringIO
import json
import logging
import os
from pathlib import Path
import numpy as np
import tornado
from matplotlib import backend_bases, cbook, _png
from matplotlib.backends import backend_agg
from matplotlib.backend_bases import _Backend
_log = logging.getLogger(__name__)
# http://www.cambiaresearch.com/articles/15/javascript-char-codes-key-codes
_SHIFT_LUT = {59: ':',
61: '+',
173: '_',
186: ':',
187: '+',
188: '<',
189: '_',
190: '>',
191: '?',
192: '~',
219: '{',
220: '|',
221: '}',
222: '"'}
_LUT = {8: 'backspace',
9: 'tab',
13: 'enter',
16: 'shift',
17: 'control',
18: 'alt',
19: 'pause',
20: 'caps',
27: 'escape',
32: ' ',
33: 'pageup',
34: 'pagedown',
35: 'end',
36: 'home',
37: 'left',
38: 'up',
39: 'right',
40: 'down',
45: 'insert',
46: 'delete',
91: 'super',
92: 'super',
93: 'select',
106: '*',
107: '+',
109: '-',
110: '.',
111: '/',
144: 'num_lock',
145: 'scroll_lock',
186: ':',
187: '=',
188: ',',
189: '-',
190: '.',
191: '/',
192: '`',
219: '[',
220: '\\',
221: ']',
222: "'"}
def _handle_key(key):
"""Handle key codes"""
code = int(key[key.index('k') + 1:])
value = chr(code)
# letter keys
if 65 <= code <= 90:
if 'shift+' in key:
key = key.replace('shift+', '')
else:
value = value.lower()
# number keys
elif 48 <= code <= 57:
if 'shift+' in key:
value = ')!@#$%^&*('[int(value)]
key = key.replace('shift+', '')
# function keys
elif 112 <= code <= 123:
value = 'f%s' % (code - 111)
# number pad keys
elif 96 <= code <= 105:
value = '%s' % (code - 96)
# keys with shift alternatives
elif code in _SHIFT_LUT and 'shift+' in key:
key = key.replace('shift+', '')
value = _SHIFT_LUT[code]
elif code in _LUT:
value = _LUT[code]
key = key[:key.index('k')] + value
return key
class FigureCanvasWebAggCore(backend_agg.FigureCanvasAgg):
supports_blit = False
def __init__(self, *args, **kwargs):
backend_agg.FigureCanvasAgg.__init__(self, *args, **kwargs)
# Set to True when the renderer contains data that is newer
# than the PNG buffer.
self._png_is_old = True
# Set to True by the `refresh` message so that the next frame
# sent to the clients will be a full frame.
self._force_full = True
# Store the current image mode so that at any point, clients can
# request the information. This should be changed by calling
# self.set_image_mode(mode) so that the notification can be given
# to the connected clients.
self._current_image_mode = 'full'
# Store the DPI ratio of the browser. This is the scaling that
# occurs automatically for all images on a HiDPI display.
self._dpi_ratio = 1
def show(self):
# show the figure window
from matplotlib.pyplot import show
show()
def draw(self):
self._png_is_old = True
try:
super().draw()
finally:
self.manager.refresh_all() # Swap the frames.
def draw_idle(self):
self.send_event("draw")
def set_image_mode(self, mode):
"""
Set the image mode for any subsequent images which will be sent
to the clients. The modes may currently be either 'full' or 'diff'.
Note: diff images may not contain transparency, therefore upon
draw this mode may be changed if the resulting image has any
transparent component.
"""
cbook._check_in_list(['full', 'diff'], mode=mode)
if self._current_image_mode != mode:
self._current_image_mode = mode
self.handle_send_image_mode(None)
def get_diff_image(self):
if self._png_is_old:
renderer = self.get_renderer()
# The buffer is created as type uint32 so that entire
# pixels can be compared in one numpy call, rather than
# needing to compare each plane separately.
buff = (np.frombuffer(renderer.buffer_rgba(), dtype=np.uint32)
.reshape((renderer.height, renderer.width)))
# If any pixels have transparency, we need to force a full
# draw as we cannot overlay new on top of old.
pixels = buff.view(dtype=np.uint8).reshape(buff.shape + (4,))
if self._force_full or np.any(pixels[:, :, 3] != 255):
self.set_image_mode('full')
output = buff
else:
self.set_image_mode('diff')
last_buffer = (np.frombuffer(self._last_renderer.buffer_rgba(),
dtype=np.uint32)
.reshape((renderer.height, renderer.width)))
diff = buff != last_buffer
output = np.where(diff, buff, 0)
# TODO: We should write a new version of write_png that
# handles the differencing inline
buff = _png.write_png(
output.view(dtype=np.uint8).reshape(output.shape + (4,)),
None, compression=6, filter=_png.PNG_FILTER_NONE)
# Swap the renderer frames
self._renderer, self._last_renderer = (
self._last_renderer, renderer)
self._force_full = False
self._png_is_old = False
return buff
def get_renderer(self, cleared=None):
# Mirrors super.get_renderer, but caches the old one
# so that we can do things such as produce a diff image
# in get_diff_image
_, _, w, h = self.figure.bbox.bounds
w, h = int(w), int(h)
key = w, h, self.figure.dpi
try:
self._lastKey, self._renderer
except AttributeError:
need_new_renderer = True
else:
need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self._renderer = backend_agg.RendererAgg(
w, h, self.figure.dpi)
self._last_renderer = backend_agg.RendererAgg(
w, h, self.figure.dpi)
self._lastKey = key
elif cleared:
self._renderer.clear()
return self._renderer
def handle_event(self, event):
e_type = event['type']
handler = getattr(self, 'handle_{0}'.format(e_type),
self.handle_unknown_event)
return handler(event)
def handle_unknown_event(self, event):
_log.warning('Unhandled message type {0}. {1}'.format(
event['type'], event))
def handle_ack(self, event):
# Network latency tends to decrease if traffic is flowing
# in both directions. Therefore, the browser sends back
# an "ack" message after each image frame is received.
# This could also be used as a simple sanity check in the
# future, but for now the performance increase is enough
# to justify it, even if the server does nothing with it.
pass
def handle_draw(self, event):
self.draw()
def _handle_mouse(self, event):
x = event['x']
y = event['y']
y = self.get_renderer().height - y
# Javascript button numbers and matplotlib button numbers are
# off by 1
button = event['button'] + 1
# The right mouse button pops up a context menu, which
# doesn't work very well, so use the middle mouse button
# instead. It doesn't seem that it's possible to disable
# the context menu in recent versions of Chrome. If this
# is resolved, please also adjust the docstring in MouseEvent.
if button == 2:
button = 3
e_type = event['type']
guiEvent = event.get('guiEvent', None)
if e_type == 'button_press':
self.button_press_event(x, y, button, guiEvent=guiEvent)
elif e_type == 'button_release':
self.button_release_event(x, y, button, guiEvent=guiEvent)
elif e_type == 'motion_notify':
self.motion_notify_event(x, y, guiEvent=guiEvent)
elif e_type == 'figure_enter':
self.enter_notify_event(xy=(x, y), guiEvent=guiEvent)
elif e_type == 'figure_leave':
self.leave_notify_event()
elif e_type == 'scroll':
self.scroll_event(x, y, event['step'], guiEvent=guiEvent)
handle_button_press = handle_button_release = handle_motion_notify = \
handle_figure_enter = handle_figure_leave = handle_scroll = \
_handle_mouse
def _handle_key(self, event):
key = _handle_key(event['key'])
e_type = event['type']
guiEvent = event.get('guiEvent', None)
if e_type == 'key_press':
self.key_press_event(key, guiEvent=guiEvent)
elif e_type == 'key_release':
self.key_release_event(key, guiEvent=guiEvent)
handle_key_press = handle_key_release = _handle_key
def handle_toolbar_button(self, event):
# TODO: Be more suspicious of the input
getattr(self.toolbar, event['name'])()
def handle_refresh(self, event):
figure_label = self.figure.get_label()
if not figure_label:
figure_label = "Figure {0}".format(self.manager.num)
self.send_event('figure_label', label=figure_label)
self._force_full = True
self.draw_idle()
def handle_resize(self, event):
x, y = event.get('width', 800), event.get('height', 800)
x, y = int(x) * self._dpi_ratio, int(y) * self._dpi_ratio
fig = self.figure
# An attempt at approximating the figure size in pixels.
fig.set_size_inches(x / fig.dpi, y / fig.dpi, forward=False)
_, _, w, h = self.figure.bbox.bounds
# Acknowledge the resize, and force the viewer to update the
# canvas size to the figure's new size (which is hopefully
# identical or within a pixel or so).
self._png_is_old = True
self.manager.resize(w, h)
self.resize_event()
def handle_send_image_mode(self, event):
# The client requests notification of what the current image mode is.
self.send_event('image_mode', mode=self._current_image_mode)
def handle_set_dpi_ratio(self, event):
dpi_ratio = event.get('dpi_ratio', 1)
if dpi_ratio != self._dpi_ratio:
# We don't want to scale up the figure dpi more than once.
if not hasattr(self.figure, '_original_dpi'):
self.figure._original_dpi = self.figure.dpi
self.figure.dpi = dpi_ratio * self.figure._original_dpi
self._dpi_ratio = dpi_ratio
self._force_full = True
self.draw_idle()
def send_event(self, event_type, **kwargs):
self.manager._send_event(event_type, **kwargs)
_JQUERY_ICON_CLASSES = {
'home': 'ui-icon ui-icon-home',
'back': 'ui-icon ui-icon-circle-arrow-w',
'forward': 'ui-icon ui-icon-circle-arrow-e',
'zoom_to_rect': 'ui-icon ui-icon-search',
'move': 'ui-icon ui-icon-arrow-4',
'download': 'ui-icon ui-icon-disk',
None: None,
}
class NavigationToolbar2WebAgg(backend_bases.NavigationToolbar2):
# Use the standard toolbar items + download button
toolitems = [(text, tooltip_text, _JQUERY_ICON_CLASSES[image_file],
name_of_method)
for text, tooltip_text, image_file, name_of_method
in (backend_bases.NavigationToolbar2.toolitems +
(('Download', 'Download plot', 'download', 'download'),))
if image_file in _JQUERY_ICON_CLASSES]
def _init_toolbar(self):
self.message = ''
self.cursor = 0
def set_message(self, message):
if message != self.message:
self.canvas.send_event("message", message=message)
self.message = message
def set_cursor(self, cursor):
if cursor != self.cursor:
self.canvas.send_event("cursor", cursor=cursor)
self.cursor = cursor
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.send_event(
"rubberband", x0=x0, y0=y0, x1=x1, y1=y1)
def release_zoom(self, event):
backend_bases.NavigationToolbar2.release_zoom(self, event)
self.canvas.send_event(
"rubberband", x0=-1, y0=-1, x1=-1, y1=-1)
def save_figure(self, *args):
"""Save the current figure"""
self.canvas.send_event('save')
class FigureManagerWebAgg(backend_bases.FigureManagerBase):
ToolbarCls = NavigationToolbar2WebAgg
def __init__(self, canvas, num):
backend_bases.FigureManagerBase.__init__(self, canvas, num)
self.web_sockets = set()
self.toolbar = self._get_toolbar(canvas)
def show(self):
pass
def _get_toolbar(self, canvas):
toolbar = self.ToolbarCls(canvas)
return toolbar
def resize(self, w, h):
self._send_event(
'resize',
size=(w / self.canvas._dpi_ratio, h / self.canvas._dpi_ratio))
def set_window_title(self, title):
self._send_event('figure_label', label=title)
# The following methods are specific to FigureManagerWebAgg
def add_web_socket(self, web_socket):
assert hasattr(web_socket, 'send_binary')
assert hasattr(web_socket, 'send_json')
self.web_sockets.add(web_socket)
_, _, w, h = self.canvas.figure.bbox.bounds
self.resize(w, h)
self._send_event('refresh')
def remove_web_socket(self, web_socket):
self.web_sockets.remove(web_socket)
def handle_json(self, content):
self.canvas.handle_event(content)
def refresh_all(self):
if self.web_sockets:
diff = self.canvas.get_diff_image()
if diff is not None:
for s in self.web_sockets:
s.send_binary(diff)
@classmethod
def get_javascript(cls, stream=None):
if stream is None:
output = StringIO()
else:
output = stream
output.write((Path(__file__).parent / "web_backend/js/mpl.js")
.read_text(encoding="utf-8"))
toolitems = []
for name, tooltip, image, method in cls.ToolbarCls.toolitems:
if name is None:
toolitems.append(['', '', '', ''])
else:
toolitems.append([name, tooltip, image, method])
output.write("mpl.toolbar_items = {0};\n\n".format(
json.dumps(toolitems)))
extensions = []
for filetype, ext in sorted(FigureCanvasWebAggCore.
get_supported_filetypes_grouped().
items()):
if not ext[0] == 'pgf': # pgf does not support BytesIO
extensions.append(ext[0])
output.write("mpl.extensions = {0};\n\n".format(
json.dumps(extensions)))
output.write("mpl.default_extension = {0};".format(
json.dumps(FigureCanvasWebAggCore.get_default_filetype())))
if stream is None:
return output.getvalue()
@classmethod
def get_static_file_path(cls):
return os.path.join(os.path.dirname(__file__), 'web_backend')
def _send_event(self, event_type, **kwargs):
payload = {'type': event_type, **kwargs}
for s in self.web_sockets:
s.send_json(payload)
class TimerTornado(backend_bases.TimerBase):
def _timer_start(self):
self._timer_stop()
if self._single:
ioloop = tornado.ioloop.IOLoop.instance()
self._timer = ioloop.add_timeout(
datetime.timedelta(milliseconds=self.interval),
self._on_timer)
else:
self._timer = tornado.ioloop.PeriodicCallback(
self._on_timer,
self.interval)
self._timer.start()
def _timer_stop(self):
if self._timer is None:
return
elif self._single:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timer)
else:
self._timer.stop()
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
@_Backend.export
class _BackendWebAggCoreAgg(_Backend):
FigureCanvas = FigureCanvasWebAggCore
FigureManager = FigureManagerWebAgg
|
9892403718cc238a4ef71b9d983557c07c37a915c9f42375afbb965377d35bf2
|
import contextlib
from distutils.version import StrictVersion
import functools
import inspect
import os
from pathlib import Path
import shutil
import sys
import unittest
import warnings
import matplotlib as mpl
import matplotlib.style
import matplotlib.units
import matplotlib.testing
from matplotlib import cbook
from matplotlib import ft2font
from matplotlib import pyplot as plt
from matplotlib import ticker
from . import is_called_from_pytest
from .compare import comparable_formats, compare_images, make_test_filename
from .exceptions import ImageComparisonFailure
@contextlib.contextmanager
def _cleanup_cm():
orig_units_registry = matplotlib.units.registry.copy()
try:
with warnings.catch_warnings(), matplotlib.rc_context():
yield
finally:
matplotlib.units.registry.clear()
matplotlib.units.registry.update(orig_units_registry)
plt.close("all")
class CleanupTestCase(unittest.TestCase):
"""A wrapper for unittest.TestCase that includes cleanup operations."""
@classmethod
def setUpClass(cls):
cls._cm = _cleanup_cm().__enter__()
@classmethod
def tearDownClass(cls):
cls._cm.__exit__(None, None, None)
@cbook.deprecated("3.0")
class CleanupTest(object):
setup_class = classmethod(CleanupTestCase.setUpClass.__func__)
teardown_class = classmethod(CleanupTestCase.tearDownClass.__func__)
def test(self):
self._func()
def cleanup(style=None):
"""
A decorator to ensure that any global state is reset before
running a test.
Parameters
----------
style : str, optional
The name of the style to apply.
"""
# If cleanup is used without arguments, `style` will be a callable, and we
# pass it directly to the wrapper generator. If cleanup if called with an
# argument, it is a string naming a style, and the function will be passed
# as an argument to what we return. This is a confusing, but somewhat
# standard, pattern for writing a decorator with optional arguments.
def make_cleanup(func):
if inspect.isgeneratorfunction(func):
@functools.wraps(func)
def wrapped_callable(*args, **kwargs):
with _cleanup_cm(), matplotlib.style.context(style):
yield from func(*args, **kwargs)
else:
@functools.wraps(func)
def wrapped_callable(*args, **kwargs):
with _cleanup_cm(), matplotlib.style.context(style):
func(*args, **kwargs)
return wrapped_callable
if isinstance(style, str):
return make_cleanup
else:
result = make_cleanup(style)
# Default of mpl_test_settings fixture and image_comparison too.
style = '_classic_test'
return result
def check_freetype_version(ver):
if ver is None:
return True
if isinstance(ver, str):
ver = (ver, ver)
ver = [StrictVersion(x) for x in ver]
found = StrictVersion(ft2font.__freetype_version__)
return ver[0] <= found <= ver[1]
def _checked_on_freetype_version(required_freetype_version):
import pytest
reason = ("Mismatched version of freetype. "
"Test requires '%s', you have '%s'" %
(required_freetype_version, ft2font.__freetype_version__))
return pytest.mark.xfail(
not check_freetype_version(required_freetype_version),
reason=reason, raises=ImageComparisonFailure, strict=False)
def remove_ticks_and_titles(figure):
figure.suptitle("")
null_formatter = ticker.NullFormatter()
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(null_formatter)
ax.xaxis.set_minor_formatter(null_formatter)
ax.yaxis.set_major_formatter(null_formatter)
ax.yaxis.set_minor_formatter(null_formatter)
try:
ax.zaxis.set_major_formatter(null_formatter)
ax.zaxis.set_minor_formatter(null_formatter)
except AttributeError:
pass
def _raise_on_image_difference(expected, actual, tol):
__tracebackhide__ = True
err = compare_images(expected, actual, tol, in_decorator=True)
if not os.path.exists(expected):
raise ImageComparisonFailure('image does not exist: %s' % expected)
if err:
for key in ["actual", "expected"]:
err[key] = os.path.relpath(err[key])
raise ImageComparisonFailure(
'images not close (RMS %(rms).3f):\n\t%(actual)s\n\t%(expected)s '
% err)
def _skip_if_format_is_uncomparable(extension):
import pytest
return pytest.mark.skipif(
extension not in comparable_formats(),
reason='Cannot compare {} files on this system'.format(extension))
def _mark_skip_if_format_is_uncomparable(extension):
import pytest
if isinstance(extension, str):
name = extension
marks = []
elif isinstance(extension, tuple):
# Extension might be a pytest ParameterSet instead of a plain string.
# Unfortunately, this type is not exposed, so since it's a namedtuple,
# check for a tuple instead.
name, = extension.values
marks = [*extension.marks]
else:
# Extension might be a pytest marker instead of a plain string.
name, = extension.args
marks = [extension.mark]
return pytest.param(name,
marks=[*marks, _skip_if_format_is_uncomparable(name)])
class _ImageComparisonBase(object):
"""
Image comparison base class
This class provides *just* the comparison-related functionality and avoids
any code that would be specific to any testing framework.
"""
def __init__(self, tol, remove_text, savefig_kwargs):
self.func = self.baseline_dir = self.result_dir = None
self.tol = tol
self.remove_text = remove_text
self.savefig_kwargs = savefig_kwargs
def delayed_init(self, func):
assert self.func is None, "it looks like same decorator used twice"
self.func = func
self.baseline_dir, self.result_dir = _image_directories(func)
def copy_baseline(self, baseline, extension):
baseline_path = os.path.join(self.baseline_dir, baseline)
orig_expected_fname = baseline_path + '.' + extension
if extension == 'eps' and not os.path.exists(orig_expected_fname):
orig_expected_fname = baseline_path + '.pdf'
expected_fname = make_test_filename(
os.path.join(self.result_dir,
os.path.basename(orig_expected_fname)),
'expected')
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
reason = ("Do not have baseline image {} because this "
"file does not exist: {}".format(expected_fname,
orig_expected_fname))
raise ImageComparisonFailure(reason)
return expected_fname
def compare(self, idx, baseline, extension):
__tracebackhide__ = True
fignum = plt.get_fignums()[idx]
fig = plt.figure(fignum)
if self.remove_text:
remove_ticks_and_titles(fig)
actual_fname = (
os.path.join(self.result_dir, baseline) + '.' + extension)
kwargs = self.savefig_kwargs.copy()
if extension == 'pdf':
kwargs.setdefault('metadata',
{'Creator': None, 'Producer': None,
'CreationDate': None})
fig.savefig(actual_fname, **kwargs)
expected_fname = self.copy_baseline(baseline, extension)
_raise_on_image_difference(expected_fname, actual_fname, self.tol)
@cbook.deprecated("3.0")
class ImageComparisonTest(CleanupTest, _ImageComparisonBase):
"""
Nose-based image comparison class
This class generates tests for a nose-based testing framework. Ideally,
this class would not be public, and the only publicly visible API would
be the :func:`image_comparison` decorator. Unfortunately, there are
existing downstream users of this class (e.g., pytest-mpl) so it cannot yet
be removed.
"""
def __init__(self, baseline_images, extensions, tol,
freetype_version, remove_text, savefig_kwargs, style):
_ImageComparisonBase.__init__(self, tol, remove_text, savefig_kwargs)
self.baseline_images = baseline_images
self.extensions = extensions
self.freetype_version = freetype_version
self.style = style
def setup(self):
func = self.func
plt.close('all')
self.setup_class()
try:
matplotlib.style.use(self.style)
matplotlib.testing.set_font_settings_for_testing()
func()
assert len(plt.get_fignums()) == len(self.baseline_images), (
"Test generated {} images but there are {} baseline images"
.format(len(plt.get_fignums()), len(self.baseline_images)))
except:
# Restore original settings before raising errors.
self.teardown_class()
raise
def teardown(self):
self.teardown_class()
def nose_runner(self):
func = self.compare
func = _checked_on_freetype_version(self.freetype_version)(func)
funcs = {extension: _skip_if_format_is_uncomparable(extension)(func)
for extension in self.extensions}
for idx, baseline in enumerate(self.baseline_images):
for extension in self.extensions:
yield funcs[extension], idx, baseline, extension
def __call__(self, func):
self.delayed_init(func)
import nose.tools
@functools.wraps(func)
@nose.tools.with_setup(self.setup, self.teardown)
def runner_wrapper():
yield from self.nose_runner()
return runner_wrapper
def _pytest_image_comparison(baseline_images, extensions, tol,
freetype_version, remove_text, savefig_kwargs,
style):
"""
Decorate function with image comparison for pytest.
This function creates a decorator that wraps a figure-generating function
with image comparison code. Pytest can become confused if we change the
signature of the function, so we indirectly pass anything we need via the
`mpl_image_comparison_parameters` fixture and extra markers.
"""
import pytest
extensions = map(_mark_skip_if_format_is_uncomparable, extensions)
def decorator(func):
@functools.wraps(func)
# Parameter indirection; see docstring above and comment below.
@pytest.mark.usefixtures('mpl_image_comparison_parameters')
@pytest.mark.parametrize('extension', extensions)
@pytest.mark.baseline_images(baseline_images)
# END Parameter indirection.
@pytest.mark.style(style)
@_checked_on_freetype_version(freetype_version)
@functools.wraps(func)
def wrapper(*args, **kwargs):
__tracebackhide__ = True
img = _ImageComparisonBase(tol=tol, remove_text=remove_text,
savefig_kwargs=savefig_kwargs)
img.delayed_init(func)
matplotlib.testing.set_font_settings_for_testing()
func(*args, **kwargs)
# Parameter indirection:
# This is hacked on via the mpl_image_comparison_parameters fixture
# so that we don't need to modify the function's real signature for
# any parametrization. Modifying the signature is very very tricky
# and likely to confuse pytest.
baseline_images, extension = func.parameters
assert len(plt.get_fignums()) == len(baseline_images), (
"Test generated {} images but there are {} baseline images"
.format(len(plt.get_fignums()), len(baseline_images)))
for idx, baseline in enumerate(baseline_images):
img.compare(idx, baseline, extension)
return wrapper
return decorator
def image_comparison(baseline_images, extensions=None, tol=0,
freetype_version=None, remove_text=False,
savefig_kwarg=None,
# Default of mpl_test_settings fixture and cleanup too.
style='_classic_test'):
"""
Compare images generated by the test with those specified in
*baseline_images*, which must correspond, else an `ImageComparisonFailure`
exception will be raised.
Parameters
----------
baseline_images : list or None
A list of strings specifying the names of the images generated by
calls to :meth:`matplotlib.figure.savefig`.
If *None*, the test function must use the ``baseline_images`` fixture,
either as a parameter or with `pytest.mark.usefixtures`. This value is
only allowed when using pytest.
extensions : None or list of str
The list of extensions to test, e.g. ``['png', 'pdf']``.
If *None*, defaults to all supported extensions: png, pdf, and svg.
In order to keep the size of the test suite from ballooning, we only
include the ``svg`` or ``pdf`` outputs if the test is explicitly
exercising a feature dependent on that backend (see also the
`check_figures_equal` decorator for that purpose).
tol : float, optional, default: 0
The RMS threshold above which the test is considered failed.
freetype_version : str or tuple
The expected freetype version or range of versions for this test to
pass.
remove_text : bool
Remove the title and tick text from the figure before comparison. This
is useful to make the baseline images independent of variations in text
rendering between different versions of FreeType.
This does not remove other, more deliberate, text, such as legends and
annotations.
savefig_kwarg : dict
Optional arguments that are passed to the savefig method.
style : string
Optional name for the base style to apply to the image test. The test
itself can also apply additional styles if desired. Defaults to the
'_classic_test' style.
"""
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
if savefig_kwarg is None:
#default no kwargs to savefig
savefig_kwarg = dict()
if mpl.testing._wants_nose():
if baseline_images is None:
raise ValueError('baseline_images must be specified')
return ImageComparisonTest(
baseline_images=baseline_images, extensions=extensions, tol=tol,
freetype_version=freetype_version, remove_text=remove_text,
savefig_kwargs=savefig_kwarg, style=style)
else:
return _pytest_image_comparison(
baseline_images=baseline_images, extensions=extensions, tol=tol,
freetype_version=freetype_version, remove_text=remove_text,
savefig_kwargs=savefig_kwarg, style=style)
def check_figures_equal(*, extensions=("png", "pdf", "svg"), tol=0):
"""
Decorator for test cases that generate and compare two figures.
The decorated function must take two arguments, *fig_test* and *fig_ref*,
and draw the test and reference images on them. After the function
returns, the figures are saved and compared.
This decorator should be preferred over `image_comparison` when possible in
order to keep the size of the test suite from ballooning.
Parameters
----------
extensions : list, default: ["png", "pdf", "svg"]
The extensions to test.
tol : float
The RMS threshold above which the test is considered failed.
Examples
--------
Check that calling `Axes.plot` with a single argument plots it against
``[0, 1, 2, ...]``::
@check_figures_equal()
def test_plot(fig_test, fig_ref):
fig_test.subplots().plot([1, 3, 5])
fig_ref.subplots().plot([0, 1, 2], [1, 3, 5])
"""
def decorator(func):
import pytest
_, result_dir = map(Path, _image_directories(func))
if len(inspect.signature(func).parameters) == 2:
# Free-standing function.
@pytest.mark.parametrize("ext", extensions)
def wrapper(ext):
fig_test = plt.figure("test")
fig_ref = plt.figure("reference")
func(fig_test, fig_ref)
test_image_path = str(
result_dir / (func.__name__ + "." + ext))
ref_image_path = str(
result_dir / (func.__name__ + "-expected." + ext))
fig_test.savefig(test_image_path)
fig_ref.savefig(ref_image_path)
_raise_on_image_difference(
ref_image_path, test_image_path, tol=tol)
elif len(inspect.signature(func).parameters) == 3:
# Method.
@pytest.mark.parametrize("ext", extensions)
def wrapper(self, ext):
fig_test = plt.figure("test")
fig_ref = plt.figure("reference")
func(self, fig_test, fig_ref)
test_image_path = str(
result_dir / (func.__name__ + "." + ext))
ref_image_path = str(
result_dir / (func.__name__ + "-expected." + ext))
fig_test.savefig(test_image_path)
fig_ref.savefig(ref_image_path)
_raise_on_image_difference(
ref_image_path, test_image_path, tol=tol)
return wrapper
return decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
For test module ``foo.bar.test_baz``, the baseline directory is at
``foo/bar/baseline_images/test_baz`` and the result directory at
``$(pwd)/result_images/test_baz``. The result directory is created if it
doesn't exist.
"""
module_path = Path(sys.modules[func.__module__].__file__)
baseline_dir = module_path.parent / "baseline_images" / module_path.stem
result_dir = Path().resolve() / "result_images" / module_path.stem
result_dir.mkdir(parents=True, exist_ok=True)
return str(baseline_dir), str(result_dir)
@cbook.deprecated("3.1", alternative="pytest.mark.backend")
def switch_backend(backend):
def switch_backend_decorator(func):
@functools.wraps(func)
def backend_switcher(*args, **kwargs):
try:
prev_backend = mpl.get_backend()
matplotlib.testing.setup()
plt.switch_backend(backend)
return func(*args, **kwargs)
finally:
plt.switch_backend(prev_backend)
return backend_switcher
return switch_backend_decorator
@cbook.deprecated("3.0")
def skip_if_command_unavailable(cmd):
"""
skips a test if a command is unavailable.
Parameters
----------
cmd : list of str
must be a complete command which should not
return a non zero exit code, something like
["latex", "-version"]
"""
from subprocess import check_output
try:
check_output(cmd)
except Exception:
import pytest
return pytest.mark.skip(reason='missing command: %s' % cmd[0])
return lambda f: f
|
18cfb8916dd073418d773a8a8e6eaa9dc2be45663da2e8234000865c9cb154cc
|
"""
Provides utilities to test output reproducibility.
"""
import os
import re
import subprocess
import sys
import pytest
import matplotlib
from matplotlib import pyplot as plt
def _determinism_save(objects='mhi', format="pdf", usetex=False):
# save current value of SOURCE_DATE_EPOCH and set it
# to a constant value, so that time difference is not
# taken into account
sde = os.environ.pop('SOURCE_DATE_EPOCH', None)
os.environ['SOURCE_DATE_EPOCH'] = "946684800"
matplotlib.rcParams['text.usetex'] = usetex
fig = plt.figure()
if 'm' in objects:
# use different markers...
ax1 = fig.add_subplot(1, 6, 1)
x = range(10)
ax1.plot(x, [1] * 10, marker='D')
ax1.plot(x, [2] * 10, marker='x')
ax1.plot(x, [3] * 10, marker='^')
ax1.plot(x, [4] * 10, marker='H')
ax1.plot(x, [5] * 10, marker='v')
if 'h' in objects:
# also use different hatch patterns
ax2 = fig.add_subplot(1, 6, 2)
bars = (ax2.bar(range(1, 5), range(1, 5)) +
ax2.bar(range(1, 5), [6] * 4, bottom=range(1, 5)))
ax2.set_xticks([1.5, 2.5, 3.5, 4.5])
patterns = ('-', '+', 'x', '\\', '*', 'o', 'O', '.')
for bar, pattern in zip(bars, patterns):
bar.set_hatch(pattern)
if 'i' in objects:
# also use different images
A = [[1, 2, 3], [2, 3, 1], [3, 1, 2]]
fig.add_subplot(1, 6, 3).imshow(A, interpolation='nearest')
A = [[1, 3, 2], [1, 2, 3], [3, 1, 2]]
fig.add_subplot(1, 6, 4).imshow(A, interpolation='bilinear')
A = [[2, 3, 1], [1, 2, 3], [2, 1, 3]]
fig.add_subplot(1, 6, 5).imshow(A, interpolation='bicubic')
x = range(5)
fig.add_subplot(1, 6, 6).plot(x, x)
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
fig.savefig(stdout, format=format)
# Restores SOURCE_DATE_EPOCH
if sde is None:
os.environ.pop('SOURCE_DATE_EPOCH', None)
else:
os.environ['SOURCE_DATE_EPOCH'] = sde
def _determinism_check(objects='mhi', format="pdf", usetex=False):
"""
Output three times the same graphs and checks that the outputs are exactly
the same.
Parameters
----------
objects : str
contains characters corresponding to objects to be included in the test
document: 'm' for markers, 'h' for hatch patterns, 'i' for images. The
default value is "mhi", so that the test includes all these objects.
format : str
format string. The default value is "pdf".
"""
plots = []
for i in range(3):
result = subprocess.check_output([
sys.executable, '-R', '-c',
'import matplotlib; '
'matplotlib._called_from_pytest = True; '
'matplotlib.use(%r); '
'from matplotlib.testing.determinism import _determinism_save;'
'_determinism_save(%r, %r, %r)'
% (format, objects, format, usetex)])
plots.append(result)
for p in plots[1:]:
if usetex:
if p != plots[0]:
pytest.skip("failed, maybe due to ghostscript timestamps")
else:
assert p == plots[0]
def _determinism_source_date_epoch(format, string, keyword=b"CreationDate"):
"""
Test SOURCE_DATE_EPOCH support. Output a document with the environment
variable SOURCE_DATE_EPOCH set to 2000-01-01 00:00 UTC and check that the
document contains the timestamp that corresponds to this date (given as an
argument).
Parameters
----------
format : str
format string, such as "pdf".
string : str
timestamp string for 2000-01-01 00:00 UTC.
keyword : bytes
a string to look at when searching for the timestamp in the document
(used in case the test fails).
"""
buff = subprocess.check_output([
sys.executable, '-R', '-c',
'import matplotlib; '
'matplotlib._called_from_pytest = True; '
'matplotlib.use(%r); '
'from matplotlib.testing.determinism import _determinism_save;'
'_determinism_save(%r, %r)'
% (format, "", format)])
find_keyword = re.compile(b".*" + keyword + b".*")
key = find_keyword.search(buff)
if key:
print(key.group())
else:
print("Timestamp keyword (%s) not found!" % keyword)
assert string in buff
|
a9cc20b537caf6dba6c582edc82ce84a86cab395beb70d468e7e5f8dd0c96f92
|
"""
Helper functions for testing.
"""
import locale
import logging
import sys
import warnings
import matplotlib as mpl
from matplotlib import cbook
_log = logging.getLogger(__name__)
@cbook.deprecated("3.2")
def is_called_from_pytest():
"""Whether we are in a pytest run."""
return getattr(mpl, '_called_from_pytest', False)
def _wants_nose():
wants_nose = (not getattr(mpl, '_called_from_pytest', False)
and 'nose' in sys.modules)
if wants_nose:
cbook.warn_deprecated("3.2", name="support for nose-based tests")
return wants_nose
def set_font_settings_for_testing():
mpl.rcParams['font.family'] = 'DejaVu Sans'
mpl.rcParams['text.hinting'] = False
mpl.rcParams['text.hinting_factor'] = 8
def set_reproducibility_for_testing():
mpl.rcParams['svg.hashsalt'] = 'matplotlib'
def setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
try:
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, 'English_United States.1252')
except locale.Error:
_log.warning(
"Could not set locale to English/United States. "
"Some date-related tests may fail.")
mpl.use('Agg', force=True, warn=False) # use Agg backend for these tests
with cbook._suppress_matplotlib_deprecation_warning():
mpl.rcdefaults() # Start with all defaults
# These settings *must* be hardcoded for running the comparison tests and
# are not necessarily the default values as specified in rcsetup.py.
set_font_settings_for_testing()
set_reproducibility_for_testing()
|
ef64268e21c6ec3c314af9497fc99e8be8518ade401f734a874fa8b01a3861b6
|
class ImageComparisonFailure(AssertionError):
"""
Raise this exception to mark a test as a comparison between two images.
"""
|
ebef759222cae27854b0229a2765907baa67e6620087c281b4886e1d5bed5798
|
import pytest
import matplotlib
from matplotlib import cbook
def pytest_configure(config):
matplotlib.use('agg', force=True)
matplotlib._called_from_pytest = True
matplotlib._init_tests()
def pytest_unconfigure(config):
matplotlib._called_from_pytest = False
@pytest.fixture(autouse=True)
def mpl_test_settings(request):
from matplotlib.testing.decorators import _cleanup_cm
with _cleanup_cm():
backend = None
backend_marker = request.node.get_closest_marker('backend')
if backend_marker is not None:
assert len(backend_marker.args) == 1, \
"Marker 'backend' must specify 1 backend."
backend, = backend_marker.args
prev_backend = matplotlib.get_backend()
style = '_classic_test' # Default of cleanup and image_comparison too.
style_marker = request.node.get_closest_marker('style')
if style_marker is not None:
assert len(style_marker.args) == 1, \
"Marker 'style' must specify 1 style."
style, = style_marker.args
matplotlib.testing.setup()
if backend is not None:
# This import must come after setup() so it doesn't load the
# default backend prematurely.
import matplotlib.pyplot as plt
try:
plt.switch_backend(backend)
except ImportError as exc:
# Should only occur for the cairo backend tests, if neither
# pycairo nor cairocffi are installed.
if 'cairo' in backend.lower():
pytest.skip("Failed to switch to backend {} ({})."
.format(backend, exc))
else:
raise
with cbook._suppress_matplotlib_deprecation_warning():
matplotlib.style.use(style)
try:
yield
finally:
if backend is not None:
plt.switch_backend(prev_backend)
@pytest.fixture
def mpl_image_comparison_parameters(request, extension):
# This fixture is applied automatically by the image_comparison decorator.
#
# The sole purpose of this fixture is to provide an indirect method of
# obtaining parameters *without* modifying the decorated function
# signature. In this way, the function signature can stay the same and
# pytest won't get confused.
# We annotate the decorated function with any parameters captured by this
# fixture so that they can be used by the wrapper in image_comparison.
baseline_images, = request.node.get_closest_marker('baseline_images').args
if baseline_images is None:
# Allow baseline image list to be produced on the fly based on current
# parametrization.
baseline_images = request.getfixturevalue('baseline_images')
func = request.function
with cbook._setattr_cm(func.__wrapped__,
parameters=(baseline_images, extension)):
yield
@pytest.fixture
def pd():
"""Fixture to import and configure pandas."""
pd = pytest.importorskip('pandas')
try:
from pandas.plotting import (
deregister_matplotlib_converters as deregister)
deregister()
except ImportError:
pass
return pd
|
23f7b55972ca18c3c7338fc63e05f42d60a2a1dd08196549be77321887ec55c8
|
"""
Provides a collection of utilities for comparing (image) results.
"""
import atexit
import hashlib
import os
from pathlib import Path
import re
import shutil
import subprocess
import sys
from tempfile import TemporaryFile
import numpy as np
import matplotlib as mpl
from matplotlib.testing.exceptions import ImageComparisonFailure
from matplotlib import cbook
__all__ = ['compare_float', 'compare_images', 'comparable_formats']
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting `purpose` before the file's
extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
@cbook.deprecated("3.0")
def compare_float(expected, actual, relTol=None, absTol=None):
"""
Fail if the floating point values are not close enough, with
the given message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
raise ValueError("You haven't specified a 'relTol' relative "
"tolerance or a 'absTol' absolute tolerance "
"function argument. You must specify one.")
msg = ""
if absTol is not None:
absDiff = abs(expected - actual)
if absTol < absDiff:
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Abs diff: {absDiff}',
'Abs tol: {absTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs(expected - actual)
if expected:
relDiff = relDiff / abs(expected)
if relTol < relDiff:
# The relative difference is a ratio, so it's always unit-less.
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Rel diff: {relDiff}',
'Rel tol: {relTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
return msg or None
def get_cache_dir():
cachedir = mpl.get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
try:
Path(cache_dir).mkdir(parents=True, exist_ok=True)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2 ** 20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
if path.endswith('.pdf'):
md5.update(str(mpl._get_executable_info("gs").version)
.encode('utf-8'))
elif path.endswith('.svg'):
md5.update(str(mpl._get_executable_info("inkscape").version)
.encode('utf-8'))
return md5.hexdigest()
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(cmdline, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
# Modified from https://bugs.python.org/issue25567.
_find_unsafe_bytes = re.compile(br'[^a-zA-Z0-9_@%+=:,./-]').search
def _shlex_quote_bytes(b):
return (b if _find_unsafe_bytes(b) is None
else b"'" + b.replace(b"'", b"'\"'\"'") + b"'")
class _ConverterError(Exception):
pass
class _Converter(object):
def __init__(self):
self._proc = None
# Explicitly register deletion from an atexit handler because if we
# wait until the object is GC'd (which occurs later), then some module
# globals (e.g. signal.SIGKILL) has already been set to None, and
# kill() doesn't work anymore...
atexit.register(self.__del__)
def __del__(self):
if self._proc:
self._proc.kill()
self._proc.wait()
for stream in filter(None, [self._proc.stdin,
self._proc.stdout,
self._proc.stderr]):
stream.close()
self._proc = None
def _read_until(self, terminator):
"""Read until the prompt is reached."""
buf = bytearray()
while True:
c = self._proc.stdout.read(1)
if not c:
raise _ConverterError
buf.extend(c)
if buf.endswith(terminator):
return bytes(buf[:-len(terminator)])
class _GSConverter(_Converter):
def __call__(self, orig, dest):
if not self._proc:
self._proc = subprocess.Popen(
[mpl._get_executable_info("gs").executable,
"-dNOPAUSE", "-sDEVICE=png16m"],
# As far as I can see, ghostscript never outputs to stderr.
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
try:
self._read_until(b"\nGS")
except _ConverterError:
raise OSError("Failed to start Ghostscript")
def encode_and_escape(name):
return (os.fsencode(name)
.replace(b"\\", b"\\\\")
.replace(b"(", br"\(")
.replace(b")", br"\)"))
self._proc.stdin.write(
b"<< /OutputFile ("
+ encode_and_escape(dest)
+ b") >> setpagedevice ("
+ encode_and_escape(orig)
+ b") run flush\n")
self._proc.stdin.flush()
# GS> if nothing left on the stack; GS<n> if n items left on the stack.
err = self._read_until(b"GS")
stack = self._read_until(b">")
if stack or not os.path.exists(dest):
stack_size = int(stack[1:]) if stack else 0
self._proc.stdin.write(b"pop\n" * stack_size)
# Using the systemencoding should at least get the filenames right.
raise ImageComparisonFailure(
(err + b"GS" + stack + b">")
.decode(sys.getfilesystemencoding(), "replace"))
class _SVGConverter(_Converter):
def __call__(self, orig, dest):
if (not self._proc # First run.
or self._proc.poll() is not None): # Inkscape terminated.
env = os.environ.copy()
# If one passes e.g. a png file to Inkscape, it will try to
# query the user for conversion options via a GUI (even with
# `--without-gui`). Unsetting `DISPLAY` prevents this (and causes
# GTK to crash and Inkscape to terminate, but that'll just be
# reported as a regular exception below).
env.pop("DISPLAY", None) # May already be unset.
# Do not load any user options.
env["INKSCAPE_PROFILE_DIR"] = os.devnull
# Old versions of Inkscape (0.48.3.1, used on Travis as of now)
# seem to sometimes deadlock when stderr is redirected to a pipe,
# so we redirect it to a temporary file instead. This is not
# necessary anymore as of Inkscape 0.92.1.
stderr = TemporaryFile()
self._proc = subprocess.Popen(
["inkscape", "--without-gui", "--shell"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=stderr, env=env)
# Slight abuse, but makes shutdown handling easier.
self._proc.stderr = stderr
try:
self._read_until(b"\n>")
except _ConverterError:
raise OSError("Failed to start Inkscape in interactive mode")
# Inkscape uses glib's `g_shell_parse_argv`, which has a consistent
# behavior across platforms, so we can just use `shlex.quote`.
orig_b, dest_b = map(_shlex_quote_bytes,
map(os.fsencode, [orig, dest]))
if b"\n" in orig_b or b"\n" in dest_b:
# Who knows whether the current folder name has a newline, or if
# our encoding is even ASCII compatible... Just fall back on the
# slow solution (Inkscape uses `fgets` so it will always stop at a
# newline).
return make_external_conversion_command(lambda old, new: [
'inkscape', '-z', old, '--export-png', new])(orig, dest)
self._proc.stdin.write(orig_b + b" --export-png=" + dest_b + b"\n")
self._proc.stdin.flush()
try:
self._read_until(b"\n>")
except _ConverterError:
# Inkscape's output is not localized but gtk's is, so the output
# stream probably has a mixed encoding. Using the filesystem
# encoding should at least get the filenames right...
self._stderr.seek(0)
raise ImageComparisonFailure(
self._stderr.read().decode(
sys.getfilesystemencoding(), "replace"))
def _update_converter():
try:
mpl._get_executable_info("gs")
except FileNotFoundError:
pass
else:
converter['pdf'] = converter['eps'] = _GSConverter()
try:
mpl._get_executable_info("inkscape")
except FileNotFoundError:
pass
else:
converter['svg'] = _SVGConverter()
#: A dictionary that maps filename extensions to functions which
#: themselves map arguments `old` and `new` (filenames) to a list of strings.
#: The list can then be passed to Popen to convert files with that
#: extension to png format.
converter = {}
_update_converter()
def comparable_formats():
"""
Return the list of file formats that `.compare_images` can compare
on this system.
Returns
-------
supported_formats : list of str
E.g. ``['png', 'pdf', 'svg', 'eps']``.
"""
return ['png', *converter]
def convert(filename, cache):
"""
Convert the named file to png; return the name of the created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib.get_cachedir() + '/test_cache/'`. The caching is based on a
hash of the exact contents of the input file. There is no limit on the
size of the cache, so it may need to be manually cleared periodically.
"""
base, extension = filename.rsplit('.', 1)
if extension not in converter:
reason = "Don't know how to convert %s files to png" % extension
if mpl.testing._wants_nose():
from nose import SkipTest
raise SkipTest(reason)
else:
import pytest
pytest.skip(reason)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash_value = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash_value + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah, ad = actual_image.shape
ew, eh, ed = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expected_image, actual_image):
"Calculate the per-pixel errors, then compute the root mean square error."
if expected_image.shape != actual_image.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
"actual size {}".format(expected_image.shape, actual_image.shape))
# Convert to float to avoid overflowing finite integer types.
return np.sqrt(((expected_image - actual_image).astype(float) ** 2).mean())
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `.converter` dictionary. The underlying RMS is calculated
with the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual : str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
Determines the output format. If called from image_comparison
decorator, this should be True. (default=False)
Returns
-------
comparison_result : None or dict or str
Return *None* if the images are equal within the given tolerance.
If the images differ, the return value depends on *in_decorator*.
If *in_decorator* is true, a dict with the following entries is
returned:
- *rms*: The RMS of the image difference.
- *expected*: The filename of the expected image.
- *actual*: The filename of the actual image.
- *diff_image*: The filename of the difference image.
- *tol*: The comparison tolerance.
Otherwise, a human-readable multi-line string representation of this
information is returned.
Examples
--------
::
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images(img1, img2, 0.001)
"""
from matplotlib import _png
if not os.path.exists(actual):
raise Exception("Output image %s does not exist." % actual)
if os.stat(actual).st_size == 0:
raise Exception("Output image file %s is empty." % actual)
# Convert the image to png
extension = expected.split('.')[-1]
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
expected_image = _png.read_png_int(expected)
actual_image = _png.read_png_int(actual)
expected_image = expected_image[:, :, :3]
actual_image = actual_image[:, :, :3]
actual_image, expected_image = crop_to_same(
actual, actual_image, expected, expected_image)
diff_image = make_test_filename(actual, 'failed-diff')
if tol <= 0:
if np.array_equal(expected_image, actual_image):
return None
# convert to signed integers, so that the images can be subtracted without
# overflow
expected_image = expected_image.astype(np.int16)
actual_image = actual_image.astype(np.int16)
rms = calculate_rms(expected_image, actual_image)
if rms <= tol:
return None
save_diff_image(expected, actual, diff_image)
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
'''
Parameters
----------
expected : str
File path of expected image.
actual : str
File path of actual image.
output : str
File path to save difference image to.
'''
# Drop alpha channels, similarly to compare_images.
from matplotlib import _png
expected_image = _png.read_png(expected)[..., :3]
actual_image = _png.read_png(actual)[..., :3]
actual_image, expected_image = crop_to_same(
actual, actual_image, expected, expected_image)
expected_image = np.array(expected_image).astype(float)
actual_image = np.array(actual_image).astype(float)
if expected_image.shape != actual_image.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
"actual size {}".format(expected_image.shape, actual_image.shape))
abs_diff_image = np.abs(expected_image - actual_image)
# expand differences in luminance domain
abs_diff_image *= 255 * 10
save_image_np = np.clip(abs_diff_image, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np, output)
|
a6bc4b11602d5781d1d49cbd9657a1e50e4a816580789ede248ae194b5fd351a
|
# Originally from astropy project (http://astropy.org), under BSD
# 3-clause license.
import contextlib
import socket
# save original socket method for restoration
# These are global so that re-calling the turn_off_internet function doesn't
# overwrite them again
socket_original = socket.socket
socket_create_connection = socket.create_connection
socket_bind = socket.socket.bind
socket_connect = socket.socket.connect
INTERNET_OFF = False
# urllib2 uses a global variable to cache its default "opener" for opening
# connections for various protocols; we store it off here so we can restore to
# the default after re-enabling internet use
_orig_opener = None
# ::1 is apparently another valid name for localhost?
# it is returned by getaddrinfo when that function is given localhost
def check_internet_off(original_function):
"""
Wraps ``original_function``, which in most cases is assumed
to be a `socket.socket` method, to raise an `IOError` for any operations
on non-local AF_INET sockets.
"""
def new_function(*args, **kwargs):
if isinstance(args[0], socket.socket):
if not args[0].family in (socket.AF_INET, socket.AF_INET6):
# Should be fine in all but some very obscure cases
# More to the point, we don't want to affect AF_UNIX
# sockets.
return original_function(*args, **kwargs)
host = args[1][0]
addr_arg = 1
valid_hosts = ('localhost', '127.0.0.1', '::1')
else:
# The only other function this is used to wrap currently is
# socket.create_connection, which should be passed a 2-tuple, but
# we'll check just in case
if not (isinstance(args[0], tuple) and len(args[0]) == 2):
return original_function(*args, **kwargs)
host = args[0][0]
addr_arg = 0
valid_hosts = ('localhost', '127.0.0.1')
hostname = socket.gethostname()
fqdn = socket.getfqdn()
if host in (hostname, fqdn):
host = 'localhost'
new_addr = (host, args[addr_arg][1])
args = args[:addr_arg] + (new_addr,) + args[addr_arg + 1:]
if any(h in host for h in valid_hosts):
return original_function(*args, **kwargs)
else:
raise IOError("An attempt was made to connect to the internet "
"by a test that was not marked `remote_data`.")
return new_function
def turn_off_internet(verbose=False):
"""
Disable internet access via python by preventing connections from being
created using the socket module. Presumably this could be worked around by
using some other means of accessing the internet, but all default python
modules (urllib, requests, etc.) use socket [citation needed].
"""
import urllib.request
global INTERNET_OFF
global _orig_opener
if INTERNET_OFF:
return
INTERNET_OFF = True
__tracebackhide__ = True
if verbose:
print("Internet access disabled")
# Update urllib2 to force it not to use any proxies
# Must use {} here (the default of None will kick off an automatic search
# for proxies)
_orig_opener = urllib.request.build_opener()
no_proxy_handler = urllib.request.ProxyHandler({})
opener = urllib.request.build_opener(no_proxy_handler)
urllib.request.install_opener(opener)
socket.create_connection = check_internet_off(socket_create_connection)
socket.socket.bind = check_internet_off(socket_bind)
socket.socket.connect = check_internet_off(socket_connect)
return socket
def turn_on_internet(verbose=False):
"""
Restore internet access. Not used, but kept in case it is needed.
"""
import urllib.request
global INTERNET_OFF
global _orig_opener
if not INTERNET_OFF:
return
INTERNET_OFF = False
if verbose:
print("Internet access enabled")
urllib.request.install_opener(_orig_opener)
socket.create_connection = socket_create_connection
socket.socket.bind = socket_bind
socket.socket.connect = socket_connect
return socket
@contextlib.contextmanager
def no_internet(verbose=False):
"""Context manager to temporarily disable internet access (if not already
disabled). If it was already disabled before entering the context manager
(i.e. `turn_off_internet` was called previously) then this is a no-op and
leaves internet access disabled until a manual call to `turn_on_internet`.
"""
already_disabled = INTERNET_OFF
turn_off_internet(verbose=verbose)
try:
yield
finally:
if not already_disabled:
turn_on_internet(verbose=verbose)
|
0b18db403ae03c2b8561883efb9cd43c21a4a0723d915d8cc2c4b63236377618
|
from collections import OrderedDict
import itertools
import logging
import math
from numbers import Real
from operator import attrgetter
import types
import numpy as np
import matplotlib as mpl
from matplotlib import cbook, rcParams
from matplotlib.cbook import _OrderedSet, _check_1d, index_of, get_label
from matplotlib import docstring
import matplotlib.colors as mcolors
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.artist as martist
import matplotlib.transforms as mtransforms
import matplotlib.ticker as mticker
import matplotlib.axis as maxis
import matplotlib.scale as mscale
import matplotlib.spines as mspines
import matplotlib.font_manager as font_manager
import matplotlib.text as mtext
import matplotlib.image as mimage
from matplotlib.rcsetup import cycler, validate_axisbelow
_log = logging.getLogger(__name__)
def _process_plot_format(fmt):
"""
Convert a MATLAB style color/line style format string to a (*linestyle*,
*marker*, *color*) tuple.
Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
* 'C2--': the third color in the color cycle, dashed lines
See Also
--------
matplotlib.Line2D.lineStyles, matplotlib.colors.cnames
All possible styles and color format strings.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.to_rgba(fmt)
# We need to differentiate grayscale '1.0' from tri_down marker '1'
try:
fmtint = str(int(fmt))
except ValueError:
return linestyle, marker, color # Yes
else:
if fmt != fmtint:
# user definitely doesn't want tri_down marker
return linestyle, marker, color # Yes
else:
# ignore converted color
color = None
except ValueError:
pass # No, not just a color.
i = 0
while i < len(fmt):
c = fmt[i]
if fmt[i:i+2] in mlines.lineStyles: # First, the two-char styles.
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = fmt[i:i+2]
i += 2
elif c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
i += 1
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
i += 1
elif c in mcolors.get_named_colors_mapping():
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
i += 1
elif c == 'C' and i < len(fmt) - 1:
color_cycle_number = int(fmt[i + 1])
color = mcolors.to_rgba("C{}".format(color_cycle_number))
i += 2
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
class _process_plot_var_args(object):
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self.set_prop_cycle()
def __getstate__(self):
# note: it is not possible to pickle a generator (and thus a cycler).
return {'axes': self.axes, 'command': self.command}
def __setstate__(self, state):
self.__dict__ = state.copy()
self.set_prop_cycle()
def set_prop_cycle(self, *args, **kwargs):
# Can't do `args == (None,)` as that crashes cycler.
if not (args or kwargs) or (len(args) == 1 and args[0] is None):
prop_cycler = rcParams['axes.prop_cycle']
else:
prop_cycler = cycler(*args, **kwargs)
self.prop_cycler = itertools.cycle(prop_cycler)
# This should make a copy
self._prop_keys = prop_cycler.keys
def __call__(self, *args, **kwargs):
# Process units.
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop('xunits', self.axes.xaxis.units)
if self.axes.name == 'polar':
xunits = kwargs.pop('thetaunits', xunits)
if xunits != self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
yunits = kwargs.pop('yunits', self.axes.yaxis.units)
if self.axes.name == 'polar':
yunits = kwargs.pop('runits', yunits)
if yunits != self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
for pos_only in "xy":
if pos_only in kwargs:
raise TypeError("{} got an unexpected keyword argument {!r}"
.format(self.command, pos_only))
if not args:
return
# Process the 'data' kwarg.
data = kwargs.pop("data", None)
if data is not None:
replaced = [mpl._replacer(data, arg) for arg in args]
if len(args) == 1:
label_namer_idx = 0
elif len(args) == 2: # Can be x, y or y, c.
# Figure out what the second argument is.
# 1) If the second argument cannot be a format shorthand, the
# second argument is the label_namer.
# 2) Otherwise (it could have been a format shorthand),
# a) if we did perform a substitution, emit a warning, and
# use it as label_namer.
# b) otherwise, it is indeed a format shorthand; use the
# first argument as label_namer.
try:
_process_plot_format(args[1])
except ValueError: # case 1)
label_namer_idx = 1
else:
if replaced[1] is not args[1]: # case 2a)
cbook._warn_external(
"Second argument {!r} is ambiguous: could be a "
"color spec but is in data; using as data. "
"Either rename the entry in data or use three "
"arguments to plot.".format(args[1]),
RuntimeWarning)
label_namer_idx = 1
else: # case 2b)
label_namer_idx = 0
elif len(args) == 3:
label_namer_idx = 1
else:
raise ValueError(
"Using arbitrary long args with data is not supported due "
"to ambiguity of arguments; use multiple plotting calls "
"instead")
if kwargs.get("label") is None:
kwargs["label"] = mpl._label_from_arg(
replaced[label_namer_idx], args[label_namer_idx])
args = replaced
# Repeatedly grab (x, y) or (x, y, format) from the front of args and
# massage them into arguments to plot() or fill().
while args:
this, args = args[:2], args[2:]
if args and isinstance(args[0], str):
this += args[0],
args = args[1:]
yield from self._plot_args(this, kwargs)
def get_next_color(self):
"""Return the next color in the cycle."""
if 'color' not in self._prop_keys:
return 'k'
return next(self.prop_cycler)['color']
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
if self.command != 'plot':
# the Line2D class can handle unitized data, with
# support for post hoc unit changes etc. Other mpl
# artists, e.g., Polygon which _process_plot_var_args
# also serves on calls to fill, cannot. So this is a
# hack to say: if you are not "plot", which is
# creating Line2D, then convert the data now to
# floats. If you are plot, pass the raw data through
# to Line2D which will handle the conversion. So
# polygons will not support post hoc conversions of
# the unit type since they are not storing the orig
# data. Hopefully we can rationalize this at a later
# date - JDH
if bx:
x = self.axes.convert_xunits(x)
if by:
y = self.axes.convert_yunits(y)
# like asanyarray, but converts scalar to array, and doesn't change
# existing compatible sequences
x = _check_1d(x)
y = _check_1d(y)
if x.shape[0] != y.shape[0]:
raise ValueError("x and y must have same first dimension, but "
"have shapes {} and {}".format(x.shape, y.shape))
if x.ndim > 2 or y.ndim > 2:
raise ValueError("x and y can be no greater than 2-D, but have "
"shapes {} and {}".format(x.shape, y.shape))
if x.ndim == 1:
x = x[:, np.newaxis]
if y.ndim == 1:
y = y[:, np.newaxis]
return x, y
def _getdefaults(self, ignore, kw):
"""
If some keys in the property cycle (excluding those in the set
*ignore*) are absent or set to None in the dict *kw*, return a copy
of the next entry in the property cycle, excluding keys in *ignore*.
Otherwise, don't advance the property cycle, and return an empty dict.
"""
prop_keys = self._prop_keys - ignore
if any(kw.get(k, None) is None for k in prop_keys):
# Need to copy this dictionary or else the next time around
# in the cycle, the dictionary could be missing entries.
default_dict = next(self.prop_cycler).copy()
for p in ignore:
default_dict.pop(p, None)
else:
default_dict = {}
return default_dict
def _setdefaults(self, defaults, kw):
"""
Add to the dict *kw* the entries in the dict *default* that are absent
or set to None in *kw*.
"""
for k in defaults:
if kw.get(k, None) is None:
kw[k] = defaults[k]
def _makeline(self, x, y, kw, kwargs):
kw = {**kw, **kwargs} # Don't modify the original kw.
default_dict = self._getdefaults(set(), kw)
self._setdefaults(default_dict, kw)
seg = mlines.Line2D(x, y, **kw)
return seg
def _makefill(self, x, y, kw, kwargs):
kw = kw.copy() # Don't modify the original kw.
kwargs = kwargs.copy()
# Ignore 'marker'-related properties as they aren't Polygon
# properties, but they are Line2D properties, and so they are
# likely to appear in the default cycler construction.
# This is done here to the defaults dictionary as opposed to the
# other two dictionaries because we do want to capture when a
# *user* explicitly specifies a marker which should be an error.
# We also want to prevent advancing the cycler if there are no
# defaults needed after ignoring the given properties.
ignores = {'marker', 'markersize', 'markeredgecolor',
'markerfacecolor', 'markeredgewidth'}
# Also ignore anything provided by *kwargs*.
for k, v in kwargs.items():
if v is not None:
ignores.add(k)
# Only using the first dictionary to use as basis
# for getting defaults for back-compat reasons.
# Doing it with both seems to mess things up in
# various places (probably due to logic bugs elsewhere).
default_dict = self._getdefaults(ignores, kw)
self._setdefaults(default_dict, kw)
# Looks like we don't want "color" to be interpreted to
# mean both facecolor and edgecolor for some reason.
# So the "kw" dictionary is thrown out, and only its
# 'color' value is kept and translated as a 'facecolor'.
# This design should probably be revisited as it increases
# complexity.
facecolor = kw.get('color', None)
# Throw out 'color' as it is now handled as a facecolor
default_dict.pop('color', None)
# To get other properties set from the cycler
# modify the kwargs dictionary.
self._setdefaults(default_dict, kwargs)
seg = mpatches.Polygon(np.column_stack((x, y)),
facecolor=facecolor,
fill=kwargs.get('fill', True),
closed=kw['closed'])
seg.set(**kwargs)
return seg
def _plot_args(self, tup, kwargs):
ret = []
if len(tup) > 1 and isinstance(tup[-1], str):
linestyle, marker, color = _process_plot_format(tup[-1])
tup = tup[:-1]
elif len(tup) == 3:
raise ValueError('third arg must be a format string')
else:
linestyle, marker, color = None, None, None
# Don't allow any None value; These will be up-converted
# to one element array of None which causes problems
# downstream.
if any(v is None for v in tup):
raise ValueError("x, y, and format string must not be None")
kw = {}
for k, v in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if v is not None:
kw[k] = v
if len(tup) == 2:
x = _check_1d(tup[0])
y = _check_1d(tup[-1])
else:
x, y = index_of(tup[-1])
x, y = self._xy_from_xy(x, y)
if self.command == 'plot':
func = self._makeline
else:
kw['closed'] = kwargs.get('closed', True)
func = self._makefill
ncx, ncy = x.shape[1], y.shape[1]
if ncx > 1 and ncy > 1 and ncx != ncy:
cbook.warn_deprecated(
"2.2", message="cycling among columns of inputs with "
"non-matching shapes is deprecated.")
for j in range(max(ncx, ncy)):
seg = func(x[:, j % ncx], y[:, j % ncy], kw, kwargs)
ret.append(seg)
return ret
class _AxesBase(martist.Artist):
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
_twinned_axes = cbook.Grouper()
def __str__(self):
return "{0}({1[0]:g},{1[1]:g};{1[2]:g}x{1[3]:g})".format(
type(self).__name__, self._position.bounds)
def __init__(self, fig, rect,
facecolor=None, # defaults to rc axes.facecolor
frameon=True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
xscale=None,
yscale=None,
**kwargs
):
"""
Build an axes in a figure.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The axes is build in the `.Figure` *fig*.
rect : [left, bottom, width, height]
The axes is build in the rectangle *rect*. *rect* is in
`.Figure` coordinates.
sharex, sharey : `~.axes.Axes`, optional
The x or y `~.matplotlib.axis` is shared with the x or
y axis in the input `~.axes.Axes`.
frameon : bool, optional
True means that the axes frame is visible.
**kwargs
Other optional keyword arguments:
%(Axes)s
Returns
-------
axes : `~.axes.Axes`
The new `~.axes.Axes` object.
"""
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
if self._position.width < 0 or self._position.height < 0:
raise ValueError('Width and height specified must be non-negative')
self._originalPosition = self._position.frozen()
self.axes = self
self._aspect = 'auto'
self._adjustable = 'box'
self._anchor = 'C'
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharey is not None:
self._shared_y_axes.join(self, sharey)
self.set_label(label)
self.set_figure(fig)
self.set_axes_locator(kwargs.get("axes_locator", None))
self.spines = self._gen_axes_spines()
# this call may differ for non-sep axes, e.g., polar
self._init_axis()
if facecolor is None:
facecolor = rcParams['axes.facecolor']
self._facecolor = facecolor
self._frameon = frameon
self.set_axisbelow(rcParams['axes.axisbelow'])
self._rasterization_zorder = None
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_navigate(True)
self.set_navigate_mode(None)
if xscale:
self.set_xscale(xscale)
if yscale:
self.set_yscale(yscale)
self.update(kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect(
'units finalize', lambda: self._on_units_changed(scalex=True))
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect(
'units finalize', lambda: self._on_units_changed(scaley=True))
self.tick_params(
top=rcParams['xtick.top'] and rcParams['xtick.minor.top'],
bottom=rcParams['xtick.bottom'] and rcParams['xtick.minor.bottom'],
labeltop=(rcParams['xtick.labeltop'] and
rcParams['xtick.minor.top']),
labelbottom=(rcParams['xtick.labelbottom'] and
rcParams['xtick.minor.bottom']),
left=rcParams['ytick.left'] and rcParams['ytick.minor.left'],
right=rcParams['ytick.right'] and rcParams['ytick.minor.right'],
labelleft=(rcParams['ytick.labelleft'] and
rcParams['ytick.minor.left']),
labelright=(rcParams['ytick.labelright'] and
rcParams['ytick.minor.right']),
which='minor')
self.tick_params(
top=rcParams['xtick.top'] and rcParams['xtick.major.top'],
bottom=rcParams['xtick.bottom'] and rcParams['xtick.major.bottom'],
labeltop=(rcParams['xtick.labeltop'] and
rcParams['xtick.major.top']),
labelbottom=(rcParams['xtick.labelbottom'] and
rcParams['xtick.major.bottom']),
left=rcParams['ytick.left'] and rcParams['ytick.major.left'],
right=rcParams['ytick.right'] and rcParams['ytick.major.right'],
labelleft=(rcParams['ytick.labelleft'] and
rcParams['ytick.major.left']),
labelright=(rcParams['ytick.labelright'] and
rcParams['ytick.major.right']),
which='major')
self._layoutbox = None
self._poslayoutbox = None
def __getstate__(self):
# The renderer should be re-created by the figure, and then cached at
# that point.
state = super().__getstate__()
for key in ['_layoutbox', '_poslayoutbox']:
state[key] = None
# Prune the sharing & twinning info to only contain the current group.
for grouper_name in [
'_shared_x_axes', '_shared_y_axes', '_twinned_axes']:
grouper = getattr(self, grouper_name)
state[grouper_name] = (grouper.get_siblings(self)
if self in grouper else None)
return state
def __setstate__(self, state):
# Merge the grouping info back into the global groupers.
for grouper_name in [
'_shared_x_axes', '_shared_y_axes', '_twinned_axes']:
siblings = state.pop(grouper_name)
if siblings:
getattr(self, grouper_name).join(*siblings)
self.__dict__ = state
self._stale = True
def get_window_extent(self, *args, **kwargs):
"""
Return the axes bounding box in display space; *args* and *kwargs*
are empty.
This bounding box does not include the spines, ticks, ticklables,
or other labels. For a bounding box including these elements use
`~matplotlib.axes.Axes.get_tightbbox`.
See Also
--------
matplotlib.axes.Axes.get_tightbbox
matplotlib.axis.Axis.get_tightbbox
matplotlib.spines.get_window_extent
"""
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.spines['bottom'].register_axis(self.xaxis)
self.spines['top'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
self._update_transScale()
def set_figure(self, fig):
"""
Set the `.Figure` for this `.Axes`.
Parameters
----------
fig : `.Figure`
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position,
fig.transFigure)
# these will be updated later as data is added
self.dataLim = mtransforms.Bbox.null()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
Set the *_xaxis_transform*, *_yaxis_transform*, *transScale*,
*transData*, *transLimits* and *transAxes* transformations.
.. note::
This method is primarily used by rectilinear projections of the
`~matplotlib.axes.Axes` class, and is meant to be overridden by
new kinds of projection axes that need different transformations
and limits. (See `~matplotlib.projections.polar.PolarAxes` for an
example.)
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor.
# It is assumed that this part will have non-linear components
# (e.g., for a log scale).
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
def get_xaxis_transform(self, which='grid'):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
if which == 'grid':
return self._xaxis_transform
elif which == 'tick1':
# for cartesian projection, this is bottom spine
return self.spines['bottom'].get_spine_transform()
elif which == 'tick2':
# for cartesian projection, this is top spine
return self.spines['top'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_xaxis_text1_transform(self, pad_points):
"""
Returns
-------
transform : Transform
The transform used for drawing x-axis labels, which will add
*pad_points* of padding (in points) between the axes and the label.
The x-direction is in data coordinates and the y-direction is in
axis corrdinates
valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'}
The text vertical alignment.
halign : {'center', 'left', 'right'}
The text horizontal alignment.
Notes
-----
This transformation is primarily used by the `~matplotlib.axis.Axis`
class, and is meant to be overridden by new kinds of projections that
may need to place axis elements in different locations.
"""
labels_align = rcParams["xtick.alignment"]
return (self.get_xaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72,
self.figure.dpi_scale_trans),
"top", labels_align)
def get_xaxis_text2_transform(self, pad_points):
"""
Returns
-------
transform : Transform
The transform used for drawing secondary x-axis labels, which will
add *pad_points* of padding (in points) between the axes and the
label. The x-direction is in data coordinates and the y-direction
is in axis corrdinates
valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'}
The text vertical alignment.
halign : {'center', 'left', 'right'}
The text horizontal alignment.
Notes
-----
This transformation is primarily used by the `~matplotlib.axis.Axis`
class, and is meant to be overridden by new kinds of projections that
may need to place axis elements in different locations.
"""
labels_align = rcParams["xtick.alignment"]
return (self.get_xaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(0, pad_points / 72,
self.figure.dpi_scale_trans),
"bottom", labels_align)
def get_yaxis_transform(self, which='grid'):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
if which == 'grid':
return self._yaxis_transform
elif which == 'tick1':
# for cartesian projection, this is bottom spine
return self.spines['left'].get_spine_transform()
elif which == 'tick2':
# for cartesian projection, this is top spine
return self.spines['right'].get_spine_transform()
else:
raise ValueError('unknown value for which')
def get_yaxis_text1_transform(self, pad_points):
"""
Returns
-------
transform : Transform
The transform used for drawing y-axis labels, which will add
*pad_points* of padding (in points) between the axes and the label.
The x-direction is in axis coordinates and the y-direction is in
data corrdinates
valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'}
The text vertical alignment.
halign : {'center', 'left', 'right'}
The text horizontal alignment.
Notes
-----
This transformation is primarily used by the `~matplotlib.axis.Axis`
class, and is meant to be overridden by new kinds of projections that
may need to place axis elements in different locations.
"""
labels_align = rcParams["ytick.alignment"]
return (self.get_yaxis_transform(which='tick1') +
mtransforms.ScaledTranslation(-1 * pad_points / 72, 0,
self.figure.dpi_scale_trans),
labels_align, "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Returns
-------
transform : Transform
The transform used for drawing secondart y-axis labels, which will
add *pad_points* of padding (in points) between the axes and the
label. The x-direction is in axis coordinates and the y-direction
is in data corrdinates
valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'}
The text vertical alignment.
halign : {'center', 'left', 'right'}
The text horizontal alignment.
Notes
-----
This transformation is primarily used by the `~matplotlib.axis.Axis`
class, and is meant to be overridden by new kinds of projections that
may need to place axis elements in different locations.
"""
labels_align = rcParams["ytick.alignment"]
return (self.get_yaxis_transform(which='tick2') +
mtransforms.ScaledTranslation(pad_points / 72, 0,
self.figure.dpi_scale_trans),
labels_align, "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
for line in getattr(self, "lines", []): # Not set during init.
try:
line._transformed_path.invalidate()
except AttributeError:
pass
def get_position(self, original=False):
"""
Get a copy of the axes rectangle as a `.Bbox`.
Parameters
----------
original : bool
If ``True``, return the original position. Otherwise return the
active position. For an explanation of the positions see
`.set_position`.
Returns
-------
pos : `.Bbox`
"""
if original:
return self._originalPosition.frozen()
else:
locator = self.get_axes_locator()
if not locator:
self.apply_aspect()
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position.
Axes have two position attributes. The 'original' position is the
position allocated for the Axes. The 'active' position is the
position the Axes is actually drawn at. These positions are usually
the same unless a fixed aspect is set to the Axes. See `.set_aspect`
for details.
Parameters
----------
pos : [left, bottom, width, height] or `~matplotlib.transforms.Bbox`
The new position of the in `.Figure` coordinates.
which : {'both', 'active', 'original'}, optional
Determines which position variables to change.
"""
self._set_position(pos, which=which)
# because this is being called externally to the library we
# zero the constrained layout parts.
self._layoutbox = None
self._poslayoutbox = None
def _set_position(self, pos, which='both'):
"""
private version of set_position. Call this internally
to get the same functionality of `get_position`, but not
to take the axis out of the constrained_layout
hierarchy.
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
for ax in self._twinned_axes.get_siblings(self):
if which in ('both', 'active'):
ax._position.set(pos)
if which in ('both', 'original'):
ax._originalPosition.set(pos)
self.stale = True
def reset_position(self):
"""
Reset the active position to the original position.
This resets the a possible position change due to aspect constraints.
For an explanation of the positions see `.set_position`.
"""
for ax in self._twinned_axes.get_siblings(self):
pos = ax.get_position(original=True)
ax.set_position(pos, which='active')
def set_axes_locator(self, locator):
"""
Set the axes locator.
Parameters
----------
locator : Callable[[Axes, Renderer], Bbox]
"""
self._axes_locator = locator
self.stale = True
def get_axes_locator(self):
"""
Return the axes_locator.
"""
return self._axes_locator
def _set_artist_props(self, a):
"""set the boilerplate props for artists added to axes"""
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.axes = self
if a.mouseover:
self._mouseover_set.add(a)
def _gen_axes_patch(self):
"""
Returns
-------
Patch
The patch used to draw the background of the axes. It is also used
as the clipping path for any data elements on the axes.
In the standard axes, this is a rectangle, but in other projections
it may not be.
Notes
-----
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def _gen_axes_spines(self, locations=None, offset=0.0, units='inches'):
"""
Returns
-------
dict
Mapping of spine names to `Line2D` or `Patch` instances that are
used to draw axes spines.
In the standard axes, spines are single line segments, but in other
projections they may not be.
Notes
-----
Intended to be overridden by new projection types.
"""
return OrderedDict((side, mspines.Spine.linear_spine(self, side))
for side in ['left', 'right', 'bottom', 'top'])
def cla(self):
"""Clear the current axes."""
# Note: this is called by Axes.__init__()
# stash the current visibility state
if hasattr(self, 'patch'):
patch_visible = self.patch.get_visible()
else:
patch_visible = True
xaxis_visible = self.xaxis.get_visible()
yaxis_visible = self.yaxis.get_visible()
self.xaxis.cla()
self.yaxis.cla()
for name, spine in self.spines.items():
spine.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry()
if self._sharex is not None:
# major and minor are axis.Ticker class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False,
auto=self._sharex.get_autoscalex_on())
self.xaxis._scale = self._sharex.xaxis._scale
else:
self.xaxis._set_scale('linear')
try:
self.set_xlim(0, 1)
except TypeError:
pass
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False,
auto=self._sharey.get_autoscaley_on())
self.yaxis._scale = self._sharey.yaxis._scale
else:
self.yaxis._set_scale('linear')
try:
self.set_ylim(0, 1)
except TypeError:
pass
# update the minor locator for x and y axis based on rcParams
if rcParams['xtick.minor.visible']:
self.xaxis.set_minor_locator(mticker.AutoMinorLocator())
if rcParams['ytick.minor.visible']:
self.yaxis.set_minor_locator(mticker.AutoMinorLocator())
if self._sharex is None:
self._autoscaleXon = True
if self._sharey is None:
self._autoscaleYon = True
self._xmargin = rcParams['axes.xmargin']
self._ymargin = rcParams['axes.ymargin']
self._tight = None
self._use_sticky_edges = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self._mouseover_set = _OrderedSet()
self.child_axes = []
self._current_image = None # strictly for pyplot via _sci, _gci
self.legend_ = None
self.collections = [] # collection.Collection instances
self.containers = []
self.grid(False) # Disable grid on init to use rcParameter
self.grid(self._gridOn, which=rcParams['axes.grid.which'],
axis=rcParams['axes.grid.axis'])
props = font_manager.FontProperties(
size=rcParams['axes.titlesize'],
weight=rcParams['axes.titleweight'])
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='baseline',
horizontalalignment='center',
)
self._left_title = mtext.Text(
x=0.0, y=1.0, text='',
fontproperties=props.copy(),
verticalalignment='baseline',
horizontalalignment='left', )
self._right_title = mtext.Text(
x=1.0, y=1.0, text='',
fontproperties=props.copy(),
verticalalignment='baseline',
horizontalalignment='right',
)
title_offset_points = rcParams['axes.titlepad']
# refactor this out so it can be called in ax.set_title if
# pad argument used...
self._set_title_offset_trans(title_offset_points)
# determine if the title position has been set manually:
self._autotitlepos = None
for _title in (self.title, self._left_title, self._right_title):
self._set_artist_props(_title)
# The patch draws the background of the axes. We want this to be below
# the other artists. We use the frame to draw the edges so we are
# setting the edgecolor to None.
self.patch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._facecolor)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
self.set_axis_on()
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
if self._sharex:
self.xaxis.set_visible(xaxis_visible)
self.patch.set_visible(patch_visible)
if self._sharey:
self.yaxis.set_visible(yaxis_visible)
self.patch.set_visible(patch_visible)
self.stale = True
@cbook.deprecated("3.0")
@property
def mouseover_set(self):
return frozenset(self._mouseover_set)
def clear(self):
"""Clear the axes."""
self.cla()
def get_facecolor(self):
"""Get the facecolor of the Axes."""
return self.patch.get_facecolor()
get_fc = get_facecolor
def set_facecolor(self, color):
"""
Set the facecolor of the Axes.
Parameters
----------
color : color
"""
self._facecolor = color
self.stale = True
return self.patch.set_facecolor(color)
set_fc = set_facecolor
def _set_title_offset_trans(self, title_offset_points):
"""
Set the offset for the title either from rcParams['axes.titlepad']
or from set_title kwarg ``pad``.
"""
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, title_offset_points / 72,
self.figure.dpi_scale_trans)
for _title in (self.title, self._left_title, self._right_title):
_title.set_transform(self.transAxes + self.titleOffsetTrans)
_title.set_clip_box(None)
def set_prop_cycle(self, *args, **kwargs):
"""
Set the property cycle of the Axes.
The property cycle controls the style properties such as color,
marker and linestyle of future plot commands. The style properties
of data already added to the Axes are not modified.
Call signatures::
set_prop_cycle(cycler)
set_prop_cycle(label=values[, label2=values2[, ...]])
set_prop_cycle(label, values)
Form 1 sets given `~cycler.Cycler` object.
Form 2 creates a `~cycler.Cycler` which cycles over one or more
properties simultaneously and set it as the property cycle of the
axes. If multiple properties are given, their value lists must have
the same length. This is just a shortcut for explicitly creating a
cycler and passing it to the function, i.e. it's short for
``set_prop_cycle(cycler(label=values label2=values2, ...))``.
Form 3 creates a `~cycler.Cycler` for a single property and set it
as the property cycle of the axes. This form exists for compatibility
with the original `cycler.cycler` interface. Its use is discouraged
in favor of the kwarg form, i.e. ``set_prop_cycle(label=values)``.
Parameters
----------
cycler : Cycler
Set the given Cycler. *None* resets to the cycle defined by the
current style.
label : str
The property key. Must be a valid `.Artist` property.
For example, 'color' or 'linestyle'. Aliases are allowed,
such as 'c' for 'color' and 'lw' for 'linewidth'.
values : iterable
Finite-length iterable of the property values. These values
are validated and will raise a ValueError if invalid.
Examples
--------
Setting the property cycle for a single property:
>>> ax.set_prop_cycle(color=['red', 'green', 'blue'])
Setting the property cycle for simultaneously cycling over multiple
properties (e.g. red circle, green plus, blue cross):
>>> ax.set_prop_cycle(color=['red', 'green', 'blue'],
... marker=['o', '+', 'x'])
See Also
--------
matplotlib.rcsetup.cycler
Convenience function for creating validated cyclers for properties.
cycler.cycler
The original function for creating unvalidated cyclers.
"""
if args and kwargs:
raise TypeError("Cannot supply both positional and keyword "
"arguments to this method.")
# Can't do `args == (None,)` as that crashes cycler.
if len(args) == 1 and args[0] is None:
prop_cycle = None
else:
prop_cycle = cycler(*args, **kwargs)
self._get_lines.set_prop_cycle(prop_cycle)
self._get_patches_for_fill.set_prop_cycle(prop_cycle)
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
"""
Set the aspect of the axis scaling, i.e. the ratio of y-unit to x-unit.
Parameters
----------
aspect : {'auto', 'equal'} or num
Possible values:
======== ================================================
value description
======== ================================================
'auto' automatic; fill the position rectangle with data
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
adjustable : None or {'box', 'datalim'}, optional
If not ``None``, this defines which parameter will be adjusted to
meet the required aspect. See `.set_adjustable` for further
details.
anchor : None or str or 2-tuple of float, optional
If not ``None``, this defines where the Axes will be drawn if there
is extra space due to aspect constraints. The most common way to
to specify the anchor are abbreviations of cardinal directions:
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
See `.set_anchor` for further details.
share : bool, optional
If ``True``, apply the settings to all shared Axes.
Default is ``False``.
See Also
--------
matplotlib.axes.Axes.set_adjustable
defining the parameter to adjust in order to meet the required
aspect.
matplotlib.axes.Axes.set_anchor
defining the position in case of extra space.
"""
if not (cbook._str_equal(aspect, 'equal')
or cbook._str_equal(aspect, 'auto')):
aspect = float(aspect) # raise ValueError if necessary
if (not cbook._str_equal(aspect, 'auto')) and self.name == '3d':
raise NotImplementedError(
'It is not currently possible to manually set the aspect '
'on 3D axes')
if share:
axes = set(self._shared_x_axes.get_siblings(self)
+ self._shared_y_axes.get_siblings(self))
else:
axes = [self]
for ax in axes:
ax._aspect = aspect
if adjustable is None:
adjustable = self._adjustable
self.set_adjustable(adjustable, share=share) # Handle sharing.
if anchor is not None:
self.set_anchor(anchor, share=share)
self.stale = True
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable, share=False):
"""
Define which parameter the Axes will change to achieve a given aspect.
Parameters
----------
adjustable : {'box', 'datalim'}
If 'box', change the physical dimensions of the Axes.
If 'datalim', change the ``x`` or ``y`` data limits.
share : bool, optional
If ``True``, apply the settings to all shared Axes.
Default is ``False``.
See Also
--------
matplotlib.axes.Axes.set_aspect
for a description of aspect handling.
Notes
-----
Shared Axes (of which twinned Axes are a special case)
impose restrictions on how aspect ratios can be imposed.
For twinned Axes, use 'datalim'. For Axes that share both
x and y, use 'box'. Otherwise, either 'datalim' or 'box'
may be used. These limitations are partly a requirement
to avoid over-specification, and partly a result of the
particular implementation we are currently using, in
which the adjustments for aspect ratios are done sequentially
and independently on each Axes as it is drawn.
"""
cbook._check_in_list(["box", "datalim"], adjustable=adjustable)
if share:
axes = set(self._shared_x_axes.get_siblings(self)
+ self._shared_y_axes.get_siblings(self))
else:
axes = [self]
for ax in axes:
ax._adjustable = adjustable
self.stale = True
def get_anchor(self):
"""
Get the anchor location.
See Also
--------
matplotlib.axes.Axes.set_anchor
for a description of the anchor.
matplotlib.axes.Axes.set_aspect
for a description of aspect handling.
"""
return self._anchor
def set_anchor(self, anchor, share=False):
"""
Define the anchor location.
The actual drawing area (active position) of the Axes may be smaller
than the Bbox (original position) when a fixed aspect is required. The
anchor defines where the drawing area will be located within the
available space.
Parameters
----------
anchor : 2-tuple of floats or {'C', 'SW', 'S', 'SE', ...}
The anchor position may be either:
- a sequence (*cx*, *cy*). *cx* and *cy* may range from 0
to 1, where 0 is left or bottom and 1 is right or top.
- a string using cardinal directions as abbreviation:
- 'C' for centered
- 'S' (south) for bottom-center
- 'SW' (south west) for bottom-left
- etc.
Here is an overview of the possible positions:
+------+------+------+
| 'NW' | 'N' | 'NE' |
+------+------+------+
| 'W' | 'C' | 'E' |
+------+------+------+
| 'SW' | 'S' | 'SE' |
+------+------+------+
share : bool, optional
If ``True``, apply the settings to all shared Axes.
Default is ``False``.
See Also
--------
matplotlib.axes.Axes.set_aspect
for a description of aspect handling.
"""
if not (anchor in mtransforms.Bbox.coefs or len(anchor) == 2):
raise ValueError('argument must be among %s' %
', '.join(mtransforms.Bbox.coefs))
if share:
axes = set(self._shared_x_axes.get_siblings(self)
+ self._shared_y_axes.get_siblings(self))
else:
axes = [self]
for ax in axes:
ax._anchor = anchor
self.stale = True
def get_data_ratio(self):
"""
Return the aspect ratio of the raw data.
Notes
-----
This method is intended to be overridden by new projection types.
"""
xmin, xmax = self.get_xbound()
ymin, ymax = self.get_ybound()
xsize = max(abs(xmax - xmin), 1e-30)
ysize = max(abs(ymax - ymin), 1e-30)
return ysize / xsize
def get_data_ratio_log(self):
"""
Return the aspect ratio of the raw data in log scale.
Notes
-----
Will be used when both axis are in log scale.
"""
xmin, xmax = self.get_xbound()
ymin, ymax = self.get_ybound()
xsize = max(abs(math.log10(xmax) - math.log10(xmin)), 1e-30)
ysize = max(abs(math.log10(ymax) - math.log10(ymin)), 1e-30)
return ysize / xsize
def apply_aspect(self, position=None):
"""
Adjust the Axes for a specified data aspect ratio.
Depending on `.get_adjustable` this will modify either the Axes box
(position) or the view limits. In the former case, `.get_anchor`
will affect the position.
Notes
-----
This is called automatically when each Axes is drawn. You may need
to call it yourself if you need to update the Axes position and/or
view limits before the Figure is drawn.
See Also
--------
matplotlib.axes.Axes.set_aspect
for a description of aspect ratio handling.
matplotlib.axes.Axes.set_adjustable
defining the parameter to adjust in order to meet the required
aspect.
matplotlib.axes.Axes.set_anchor
defining the position in case of extra space.
"""
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if self.name != 'polar':
xscale, yscale = self.get_xscale(), self.get_yscale()
if xscale == "linear" and yscale == "linear":
aspect_scale_mode = "linear"
elif xscale == "log" and yscale == "log":
aspect_scale_mode = "log"
elif ((xscale == "linear" and yscale == "log") or
(xscale == "log" and yscale == "linear")):
if aspect != "auto":
cbook._warn_external(
'aspect is not supported for Axes with xscale=%s, '
'yscale=%s' % (xscale, yscale))
aspect = "auto"
else: # some custom projections have their own scales.
pass
else:
aspect_scale_mode = "linear"
if aspect == 'auto':
self._set_position(position, which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
figW, figH = self.get_figure().get_size_inches()
fig_aspect = figH / figW
if self._adjustable == 'box':
if self in self._twinned_axes:
raise RuntimeError("Adjustable 'box' is not allowed in a"
" twinned Axes. Use 'datalim' instead.")
if aspect_scale_mode == "log":
box_aspect = A * self.get_data_ratio_log()
else:
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self._set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self._set_position(position, which='active')
xmin, xmax = self.get_xbound()
ymin, ymax = self.get_ybound()
if aspect_scale_mode == "log":
xmin, xmax = math.log10(xmin), math.log10(xmax)
ymin, ymax = math.log10(ymin), math.log10(ymax)
xsize = max(abs(xmax - xmin), 1e-30)
ysize = max(abs(ymax - ymin), 1e-30)
l, b, w, h = position.bounds
box_aspect = fig_aspect * (h / w)
data_ratio = box_aspect / A
y_expander = (data_ratio * xsize / ysize - 1.0)
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
return
if aspect_scale_mode == "log":
dL = self.dataLim
dL_width = math.log10(dL.x1) - math.log10(dL.x0)
dL_height = math.log10(dL.y1) - math.log10(dL.y0)
xr = 1.05 * dL_width
yr = 1.05 * dL_height
else:
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
# Setting these targets to, e.g., 0.05*xr does not seem to
# help.
xm = 0
ym = 0
shared_x = self in self._shared_x_axes
shared_y = self in self._shared_y_axes
# Not sure whether we need this check:
if shared_x and shared_y:
raise RuntimeError("adjustable='datalim' is not allowed when both"
" axes are shared.")
# If y is shared, then we are only allowed to change x, etc.
if shared_y:
adjust_y = False
else:
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0) or
(Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
adjust_y = shared_x or adjy # (Ymarg > xmarg)
if adjust_y:
yc = 0.5 * (ymin + ymax)
y0 = yc - Ysize / 2.0
y1 = yc + Ysize / 2.0
if aspect_scale_mode == "log":
self.set_ybound((10. ** y0, 10. ** y1))
else:
self.set_ybound((y0, y1))
else:
xc = 0.5 * (xmin + xmax)
x0 = xc - Xsize / 2.0
x1 = xc + Xsize / 2.0
if aspect_scale_mode == "log":
self.set_xbound((10. ** x0, 10. ** x1))
else:
self.set_xbound((x0, x1))
def axis(self, *args, **kwargs):
"""
Convenience method to get or set some axis properties.
Call signatures::
xmin, xmax, ymin, ymax = axis()
xmin, xmax, ymin, ymax = axis([xmin, xmax, ymin, ymax])
xmin, xmax, ymin, ymax = axis(option)
xmin, xmax, ymin, ymax = axis(**kwargs)
Parameters
----------
xmin, ymin, xmax, ymax : float, optional
The axis limits to be set. Either none or all of the limits must
be given.
option : bool or str
If a bool, turns axis lines and labels on or off. If a string,
possible values are:
======== ==========================================================
Value Description
======== ==========================================================
'on' Turn on axis lines and labels. Same as ``True``.
'off' Turn off axis lines and labels. Same as ``False``.
'equal' Set equal scaling (i.e., make circles circular) by
changing axis limits.
'scaled' Set equal scaling (i.e., make circles circular) by
changing dimensions of the plot box.
'tight' Set limits just large enough to show all data.
'auto' Automatic scaling (fill plot box with data).
'normal' Same as 'auto'; deprecated.
'image' 'scaled' with axis limits equal to data limits.
'square' Square plot; similar to 'scaled', but initially forcing
``xmax-xmin = ymax-ymin``.
======== ==========================================================
emit : bool, optional, default *True*
Whether observers are notified of the axis limit change.
This option is passed on to `~.Axes.set_xlim` and
`~.Axes.set_ylim`.
Returns
-------
xmin, xmax, ymin, ymax : float
The axis limits.
See also
--------
matplotlib.axes.Axes.set_xlim
matplotlib.axes.Axes.set_ylim
"""
if len(args) == len(kwargs) == 0:
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
emit = kwargs.get('emit', True)
if len(args) == 1 and isinstance(args[0], str):
s = args[0].lower()
if s == 'on':
self.set_axis_on()
elif s == 'off':
self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal',
'auto', 'image', 'square'):
if s == 'normal':
cbook.warn_deprecated(
"3.1", message="Passing 'normal' to axis() is "
"deprecated since %(since)s; use 'auto' instead.")
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view(tight=False)
# self.apply_aspect()
if s == 'equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s == 'tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
elif s == 'square':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False)
xlim = self.get_xlim()
ylim = self.get_ylim()
edge_size = max(np.diff(xlim), np.diff(ylim))
self.set_xlim([xlim[0], xlim[0] + edge_size],
emit=emit, auto=False)
self.set_ylim([ylim[0], ylim[0] + edge_size],
emit=emit, auto=False)
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try:
args[0]
except IndexError:
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
auto = False # turn off autoscaling, unless...
if xmin is None and xmax is None:
auto = None # leave autoscaling state alone
xmin, xmax = self.set_xlim(xmin, xmax, emit=emit, auto=auto)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
auto = False # turn off autoscaling, unless...
if ymin is None and ymax is None:
auto = None # leave autoscaling state alone
ymin, ymax = self.set_ylim(ymin, ymax, emit=emit, auto=auto)
return xmin, xmax, ymin, ymax
v = args[0]
if isinstance(v, bool):
if v:
self.set_axis_on()
else:
self.set_axis_off()
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
if len(v) != 4:
raise ValueError('args must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]], emit=emit, auto=False)
self.set_ylim([v[2], v[3]], emit=emit, auto=False)
return v
def get_legend(self):
"""Return the `Legend` instance, or None if no legend is defined."""
return self.legend_
def get_images(self):
"""return a list of Axes images contained by the Axes"""
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
"""Return a list of lines contained by the Axes"""
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
"""Return the XAxis instance."""
return self.xaxis
def get_xgridlines(self):
"""Get the x grid lines as a list of `Line2D` instances."""
return cbook.silent_list('Line2D xgridline',
self.xaxis.get_gridlines())
def get_xticklines(self):
"""Get the x tick lines as a list of `Line2D` instances."""
return cbook.silent_list('Line2D xtickline',
self.xaxis.get_ticklines())
def get_yaxis(self):
"""Return the YAxis instance."""
return self.yaxis
def get_ygridlines(self):
"""Get the y grid lines as a list of `Line2D` instances."""
return cbook.silent_list('Line2D ygridline',
self.yaxis.get_gridlines())
def get_yticklines(self):
"""Get the y tick lines as a list of `Line2D` instances."""
return cbook.silent_list('Line2D ytickline',
self.yaxis.get_ticklines())
# Adding and tracking artists
def _sci(self, im):
"""Set the current image.
This image will be the target of colormap functions like
`~.pyplot.viridis`, and other functions such as `~.pyplot.clim`. The
current image is an attribute of the current axes.
"""
if isinstance(im, mpl.contour.ContourSet):
if im.collections[0] not in self.collections:
raise ValueError("ContourSet must be in current Axes")
elif im not in self.images and im not in self.collections:
raise ValueError("Argument must be an image, collection, or "
"ContourSet in this Axes")
self._current_image = im
def _gci(self):
"""
Helper for :func:`~matplotlib.pyplot.gci`;
do not use elsewhere.
"""
return self._current_image
def has_data(self):
"""
Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
"""
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches)) > 0
def add_artist(self, a):
"""
Add an `~.Artist` to the axes, and return the artist.
Use `add_artist` only for artists for which there is no dedicated
"add" method; and if necessary, use a method such as `update_datalim`
to manually update the dataLim if the artist is to be included in
autoscaling.
If no ``transform`` has been specified when creating the artist (e.g.
``artist.get_transform() == None``) then the transform is set to
``ax.transData``.
"""
a.axes = self
self.artists.append(a)
a._remove_method = self.artists.remove
self._set_artist_props(a)
a.set_clip_path(self.patch)
self.stale = True
return a
def add_child_axes(self, ax):
"""
Add an `~.AxesBase` to the axes' children; return the child axes.
This is the lowlevel version. See `.axes.Axes.inset_axes`.
"""
# normally axes have themselves as the axes, but these need to have
# their parent...
# Need to bypass the getter...
ax._axes = self
ax.stale_callback = martist._stale_axes_callback
self.child_axes.append(ax)
ax._remove_method = self.child_axes.remove
self.stale = True
return ax
def add_collection(self, collection, autolim=True):
"""
Add a `~.Collection` to the axes' collections; return the collection.
"""
label = collection.get_label()
if not label:
collection.set_label('_collection%d' % len(self.collections))
self.collections.append(collection)
collection._remove_method = self.collections.remove
self._set_artist_props(collection)
if collection.get_clip_path() is None:
collection.set_clip_path(self.patch)
if autolim:
self.update_datalim(collection.get_datalim(self.transData))
self.stale = True
return collection
def add_image(self, image):
"""
Add an `~.AxesImage` to the axes' images; return the image.
"""
self._set_artist_props(image)
if not image.get_label():
image.set_label('_image%d' % len(self.images))
self.images.append(image)
image._remove_method = self.images.remove
self.stale = True
return image
def _update_image_limits(self, image):
xmin, xmax, ymin, ymax = image.get_extent()
self.axes.update_datalim(((xmin, ymin), (xmax, ymax)))
def add_line(self, line):
"""
Add a `~.Line2D` to the axes' lines; return the line.
"""
self._set_artist_props(line)
if line.get_clip_path() is None:
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d' % len(self.lines))
self.lines.append(line)
line._remove_method = self.lines.remove
self.stale = True
return line
def _add_text(self, txt):
"""
Add a `~.Text` to the axes' texts; return the text.
"""
self._set_artist_props(txt)
self.texts.append(txt)
txt._remove_method = self.texts.remove
self.stale = True
return txt
def _update_line_limits(self, line):
"""
Figures out the data limit of the given line, updating self.dataLim.
"""
path = line.get_path()
if path.vertices.size == 0:
return
line_trans = line.get_transform()
if line_trans == self.transData:
data_path = path
elif any(line_trans.contains_branch_seperately(self.transData)):
# identify the transform to go from line's coordinates
# to data coordinates
trans_to_data = line_trans - self.transData
# if transData is affine we can use the cached non-affine component
# of line's path. (since the non-affine part of line_trans is
# entirely encapsulated in trans_to_data).
if self.transData.is_affine:
line_trans_path = line._get_transformed_path()
na_path, _ = line_trans_path.get_transformed_path_and_affine()
data_path = trans_to_data.transform_path_affine(na_path)
else:
data_path = trans_to_data.transform_path(path)
else:
# for backwards compatibility we update the dataLim with the
# coordinate range of the given path, even though the coordinate
# systems are completely different. This may occur in situations
# such as when ax.transAxes is passed through for absolute
# positioning.
data_path = path
if data_path.vertices.size > 0:
updatex, updatey = line_trans.contains_branch_seperately(
self.transData)
self.dataLim.update_from_path(data_path,
self.ignore_existing_data_limits,
updatex=updatex,
updatey=updatey)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a `~.Patch` to the axes' patches; return the patch.
"""
self._set_artist_props(p)
if p.get_clip_path() is None:
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = self.patches.remove
return p
def _update_patch_limits(self, patch):
"""update the data limits for patch *p*"""
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
# cannot check for '==0' since unitized data may not compare to zero
# issue #2150 - we update the limits if patch has non zero width
# or height.
if (isinstance(patch, mpatches.Rectangle) and
((not patch.get_width()) and (not patch.get_height()))):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
patch_to_data = (patch.get_data_transform() -
self.transData)
xys = patch_to_data.transform(xys)
updatex, updatey = patch.get_transform().\
contains_branch_seperately(self.transData)
self.update_datalim(xys, updatex=updatex,
updatey=updatey)
def add_table(self, tab):
"""
Add a `~.Table` to the axes' tables; return the table.
"""
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = self.tables.remove
return tab
def add_container(self, container):
"""
Add a `~.Container` to the axes' containers; return the container.
"""
label = container.get_label()
if not label:
container.set_label('_container%d' % len(self.containers))
self.containers.append(container)
container._remove_method = self.containers.remove
return container
def _on_units_changed(self, scalex=False, scaley=False):
"""
Callback for processing changes to axis units.
Currently forces updates of data limits and view limits.
"""
self.relim()
self.autoscale_view(scalex=scalex, scaley=scaley)
def relim(self, visible_only=False):
"""
Recompute the data limits based on current artists.
At present, `~.Collection` instances are not supported.
Parameters
----------
visible_only : bool
Whether to exclude invisible artists. Defaults to False.
"""
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.dataLim.set_points(mtransforms.Bbox.null().get_points())
self.ignore_existing_data_limits = True
for line in self.lines:
if not visible_only or line.get_visible():
self._update_line_limits(line)
for p in self.patches:
if not visible_only or p.get_visible():
self._update_patch_limits(p)
for image in self.images:
if not visible_only or image.get_visible():
self._update_image_limits(image)
def update_datalim(self, xys, updatex=True, updatey=True):
"""
Extend the `~.Axes.dataLim` BBox to include the given points.
If no data is set currently, the BBox will ignore its limits and set
the bound to be the bounds of the xydata (*xys*). Otherwise, it will
compute the bounds of the union of its current data and the data in
*xys*.
Parameters
----------
xys : 2D array-like
The points to include in the data limits BBox. This can be either
a list of (x, y) tuples or a Nx2 array.
updatex, updatey : bool, optional, default *True*
Whether to update the x/y limits.
"""
xys = np.asarray(xys)
if not len(xys):
return
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
"""
Extend the `~.Axes.datalim` BBox to include the given
`~matplotlib.transforms.Bbox`.
Parameters
----------
bounds : `~matplotlib.transforms.Bbox`
"""
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
"""Look for unit *kwargs* and update the axis instances as necessary"""
def _process_single_axis(data, axis, unit_name, kwargs):
# Return if there's no axis set
if axis is None:
return kwargs
if data is not None:
# We only need to update if there is nothing set yet.
if not axis.have_units():
axis.update_units(data)
# Check for units in the kwargs, and if present update axis
if kwargs is not None:
units = kwargs.pop(unit_name, axis.units)
if self.name == 'polar':
polar_units = {'xunits': 'thetaunits', 'yunits': 'runits'}
units = kwargs.pop(polar_units[unit_name], units)
if units != axis.units:
axis.set_units(units)
# If the units being set imply a different converter,
# we need to update.
if data is not None:
axis.update_units(data)
return kwargs
kwargs = _process_single_axis(xdata, self.xaxis, 'xunits', kwargs)
kwargs = _process_single_axis(ydata, self.yaxis, 'yunits', kwargs)
return kwargs
def in_axes(self, mouseevent):
"""
Return *True* if the given *mouseevent* (in display coords)
is in the Axes
"""
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied for both axes on plot commands
"""
return self._autoscaleXon and self._autoscaleYon
def get_autoscalex_on(self):
"""
Get whether autoscaling for the x-axis is applied on plot commands
"""
return self._autoscaleXon
def get_autoscaley_on(self):
"""
Get whether autoscaling for the y-axis is applied on plot commands
"""
return self._autoscaleYon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
Parameters
----------
b : bool
"""
self._autoscaleXon = b
self._autoscaleYon = b
def set_autoscalex_on(self, b):
"""
Set whether autoscaling for the x-axis is applied on plot commands
Parameters
----------
b : bool
"""
self._autoscaleXon = b
def set_autoscaley_on(self, b):
"""
Set whether autoscaling for the y-axis is applied on plot commands
Parameters
----------
b : bool
"""
self._autoscaleYon = b
@property
def use_sticky_edges(self):
"""
When autoscaling, whether to obey all `Artist.sticky_edges`.
Default is ``True``.
Setting this to ``False`` ensures that the specified margins
will be applied, even if the plot includes an image, for
example, which would otherwise force a view limit to coincide
with its data limit.
The changing this property does not change the plot until
`autoscale` or `autoscale_view` is called.
"""
return self._use_sticky_edges
@use_sticky_edges.setter
def use_sticky_edges(self, b):
self._use_sticky_edges = bool(b)
# No effect until next autoscaling, which will mark the axes as stale.
def set_xmargin(self, m):
"""
Set padding of X data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
For example, if your data is in the range [0, 2], a factor of
``m = 0.1`` will result in a range [-0.2, 2.2].
Negative values -0.5 < m < 0 will result in clipping of the data range.
I.e. for a data range [0, 2], a factor of ``m = -0.1`` will result in
a range [0.2, 1.8].
Parameters
----------
m : float greater than -0.5
"""
if m <= -0.5:
raise ValueError("margin must be greater than -0.5")
self._xmargin = m
self.stale = True
def set_ymargin(self, m):
"""
Set padding of Y data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
For example, if your data is in the range [0, 2], a factor of
``m = 0.1`` will result in a range [-0.2, 2.2].
Negative values -0.5 < m < 0 will result in clipping of the data range.
I.e. for a data range [0, 2], a factor of ``m = -0.1`` will result in
a range [0.2, 1.8].
Parameters
----------
m : float greater than -0.5
"""
if m <= -0.5:
raise ValueError("margin must be greater than -0.5")
self._ymargin = m
self.stale = True
def margins(self, *margins, x=None, y=None, tight=True):
"""
Set or retrieve autoscaling margins.
The padding added to each limit of the axes is the *margin*
times the data interval. All input parameters must be floats
within the range [0, 1]. Passing both positional and keyword
arguments is invalid and will raise a TypeError. If no
arguments (positional or otherwise) are provided, the current
margins will remain in place and simply be returned.
Specifying any margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be added to each end of that interval before
it is used in autoscaling.
Parameters
----------
*margins : float, optional
If a single positional argument is provided, it specifies
both margins of the x-axis and y-axis limits. If two
positional arguments are provided, they will be interpreted
as *xmargin*, *ymargin*. If setting the margin on a single
axis is desired, use the keyword arguments described below.
x, y : float, optional
Specific margin values for the x-axis and y-axis,
respectively. These cannot be used with positional
arguments, but can be used individually to alter on e.g.,
only the y-axis.
tight : bool or None, default is True
The *tight* parameter is passed to :meth:`autoscale_view`,
which is executed after a margin is changed; the default
here is *True*, on the assumption that when margins are
specified, no additional padding to match tick marks is
usually desired. Set *tight* to *None* will preserve
the previous setting.
Returns
-------
xmargin, ymargin : float
Notes
-----
If a previously used Axes method such as :meth:`pcolor` has set
:attr:`use_sticky_edges` to `True`, only the limits not set by
the "sticky artists" will be modified. To force all of the
margins to be set, set :attr:`use_sticky_edges` to `False`
before calling :meth:`margins`.
"""
if margins and x is not None and y is not None:
raise TypeError('Cannot pass both positional and keyword '
'arguments for x and/or y.')
elif len(margins) == 1:
x = y = margins[0]
elif len(margins) == 2:
x, y = margins
elif margins:
raise TypeError('Must pass a single positional argument for all '
'margins, or one for each margin (x, y).')
if x is None and y is None:
if tight is not True:
cbook._warn_external(f'ignoring tight={tight!r} in get mode')
return self._xmargin, self._ymargin
if x is not None:
self.set_xmargin(x)
if y is not None:
self.set_ymargin(y)
self.autoscale_view(
tight=tight, scalex=(x is not None), scaley=(y is not None)
)
def set_rasterization_zorder(self, z):
"""
Parameters
----------
z : float or None
zorder below which artists are rasterized. ``None`` means that
artists do not get rasterized based on zorder.
"""
self._rasterization_zorder = z
self.stale = True
def get_rasterization_zorder(self):
"""Return the zorder value below which artists will be rasterized."""
return self._rasterization_zorder
def autoscale(self, enable=True, axis='both', tight=None):
"""
Autoscale the axis view to the data (toggle).
Convenience method for simple axis view autoscaling.
It turns autoscaling on or off, and then,
if autoscaling for either axis is on, it performs
the autoscaling on the specified axis or axes.
Parameters
----------
enable : bool or None, optional
True (default) turns autoscaling on, False turns it off.
None leaves the autoscaling state unchanged.
axis : {'both', 'x', 'y'}, optional
which axis to operate on; default is 'both'
tight : bool or None, optional
If True, set view limits to data limits;
if False, let the locator and margins expand the view limits;
if None, use tight scaling if the only artist is an image,
otherwise treat *tight* as False.
The *tight* setting is retained for future autoscaling
until it is explicitly changed.
"""
if enable is None:
scalex = True
scaley = True
else:
scalex = False
scaley = False
if axis in ['x', 'both']:
self._autoscaleXon = bool(enable)
scalex = self._autoscaleXon
if axis in ['y', 'both']:
self._autoscaleYon = bool(enable)
scaley = self._autoscaleYon
if tight and scalex:
self._xmargin = 0
if tight and scaley:
self._ymargin = 0
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley)
def autoscale_view(self, tight=None, scalex=True, scaley=True):
"""
Autoscale the view limits using the data limits.
You can selectively autoscale only a single axis, e.g., the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
If *tight* is *False*, the axis major locator will be used
to expand the view limits if rcParams['axes.autolimit_mode']
is 'round_numbers'. Note that any margins that are in effect
will be applied first, regardless of whether *tight* is
*True* or *False*. Specifying *tight* as *True* or *False*
saves the setting as a private attribute of the Axes; specifying
it as *None* (the default) applies the previously saved value.
The data limits are not updated automatically when artist data are
changed after the artist has been added to an Axes instance. In that
case, use :meth:`matplotlib.axes.Axes.relim` prior to calling
autoscale_view.
"""
if tight is not None:
self._tight = bool(tight)
if self.use_sticky_edges and (
(self._xmargin and scalex and self._autoscaleXon) or
(self._ymargin and scaley and self._autoscaleYon)):
stickies = [artist.sticky_edges for artist in self.get_children()]
x_stickies = np.array([x for sticky in stickies for x in sticky.x])
y_stickies = np.array([y for sticky in stickies for y in sticky.y])
if self.get_xscale().lower() == 'log':
x_stickies = x_stickies[x_stickies > 0]
if self.get_yscale().lower() == 'log':
y_stickies = y_stickies[y_stickies > 0]
else: # Small optimization.
x_stickies, y_stickies = [], []
def handle_single_axis(scale, autoscaleon, shared_axes, interval,
minpos, axis, margin, stickies, set_bound):
if not (scale and autoscaleon):
return # nothing to do...
shared = shared_axes.get_siblings(self)
dl = [ax.dataLim for ax in shared]
# ignore non-finite data limits if good limits exist
finite_dl = [d for d in dl if np.isfinite(d).all()]
if len(finite_dl):
# if finite limits exist for atleast one axis (and the
# other is infinite), restore the finite limits
x_finite = [d for d in dl
if (np.isfinite(d.intervalx).all() and
(d not in finite_dl))]
y_finite = [d for d in dl
if (np.isfinite(d.intervaly).all() and
(d not in finite_dl))]
dl = finite_dl
dl.extend(x_finite)
dl.extend(y_finite)
bb = mtransforms.BboxBase.union(dl)
# fall back on the viewlimits if this is not finite:
vl = None
if not np.isfinite(bb.intervalx).all():
vl = mtransforms.BboxBase.union([ax.viewLim for ax in shared])
bb.intervalx = vl.intervalx
if not np.isfinite(bb.intervaly).all():
if vl is None:
vl = mtransforms.BboxBase.union(
[ax.viewLim for ax in shared])
bb.intervaly = vl.intervaly
x0, x1 = getattr(bb, interval)
locator = axis.get_major_locator()
x0, x1 = locator.nonsingular(x0, x1)
# Add the margin in figure space and then transform back, to handle
# non-linear scales.
minpos = getattr(bb, minpos)
transform = axis.get_transform()
inverse_trans = transform.inverted()
# We cannot use exact equality due to floating point issues e.g.
# with streamplot.
do_lower_margin = not np.any(np.isclose(x0, stickies))
do_upper_margin = not np.any(np.isclose(x1, stickies))
x0, x1 = axis._scale.limit_range_for_scale(x0, x1, minpos)
x0t, x1t = transform.transform([x0, x1])
if np.isfinite(x1t) and np.isfinite(x0t):
delta = (x1t - x0t) * margin
else:
# If at least one bound isn't finite, set margin to zero
delta = 0
if do_lower_margin:
x0t -= delta
if do_upper_margin:
x1t += delta
x0, x1 = inverse_trans.transform([x0t, x1t])
if not self._tight:
x0, x1 = locator.view_limits(x0, x1)
set_bound(x0, x1)
# End of definition of internal function 'handle_single_axis'.
handle_single_axis(
scalex, self._autoscaleXon, self._shared_x_axes, 'intervalx',
'minposx', self.xaxis, self._xmargin, x_stickies, self.set_xbound)
handle_single_axis(
scaley, self._autoscaleYon, self._shared_y_axes, 'intervaly',
'minposy', self.yaxis, self._ymargin, y_stickies, self.set_ybound)
def _get_axis_list(self):
return (self.xaxis, self.yaxis)
def _update_title_position(self, renderer):
"""
Update the title position based on the bounding box enclosing
all the ticklabels and x-axis spine and xlabel...
"""
if self._autotitlepos is not None and not self._autotitlepos:
_log.debug('title position was updated manually, not adjusting')
return
titles = (self.title, self._left_title, self._right_title)
if self._autotitlepos is None:
for title in titles:
x, y = title.get_position()
if not np.isclose(y, 1.0):
self._autotitlepos = False
_log.debug('not adjusting title pos because a title was'
' already placed manually: %f', y)
return
self._autotitlepos = True
for title in titles:
x, _ = title.get_position()
# need to start again in case of window resizing
title.set_position((x, 1.0))
# need to check all our twins too...
axs = self._twinned_axes.get_siblings(self)
# and all the children
for ax in self.child_axes:
if ax is not None:
locator = ax.get_axes_locator()
if locator:
pos = locator(self, renderer)
ax.apply_aspect(pos)
else:
ax.apply_aspect()
axs = axs + [ax]
top = 0
for ax in axs:
if (ax.xaxis.get_ticks_position() in ['top', 'unknown']
or ax.xaxis.get_label_position() == 'top'):
bb = ax.xaxis.get_tightbbox(renderer)
else:
bb = ax.get_window_extent(renderer)
if bb is not None:
top = max(top, bb.ymax)
if title.get_window_extent(renderer).ymin < top:
_, y = self.transAxes.inverted().transform((0, top))
title.set_position((x, y))
# empirically, this doesn't always get the min to top,
# so we need to adjust again.
if title.get_window_extent(renderer).ymin < top:
_, y = self.transAxes.inverted().transform(
(0., 2 * top - title.get_window_extent(renderer).ymin))
title.set_position((x, y))
ymax = max(title.get_position()[1] for title in titles)
for title in titles:
# now line up all the titles at the highest baseline.
x, _ = title.get_position()
title.set_position((x, ymax))
# Drawing
@martist.allow_rasterization
def draw(self, renderer=None, inframe=False):
"""Draw everything (plot lines, axes, labels)"""
if renderer is None:
renderer = self.figure._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible():
return
renderer.open_group('axes')
# prevent triggering call backs during the draw process
self._stale = True
# loop over self and child axes...
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
artists = self.get_children()
artists.remove(self.patch)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground. Do this before drawing the axis
# objects so that the spine has the opportunity to update them.
if not (self.axison and self._frameon):
for spine in self.spines.values():
artists.remove(spine)
self._update_title_position(renderer)
if not self.axison or inframe:
for _axis in self._get_axis_list():
artists.remove(_axis)
if inframe:
artists.remove(self.title)
artists.remove(self._left_title)
artists.remove(self._right_title)
if not self.figure.canvas.is_saving():
artists = [a for a in artists
if not a.get_animated() or a in self.images]
artists = sorted(artists, key=attrgetter('zorder'))
# rasterize artists with negative zorder
# if the minimum zorder is negative, start rasterization
rasterization_zorder = self._rasterization_zorder
if (rasterization_zorder is not None and
artists and artists[0].zorder < rasterization_zorder):
renderer.start_rasterizing()
artists_rasterized = [a for a in artists
if a.zorder < rasterization_zorder]
artists = [a for a in artists
if a.zorder >= rasterization_zorder]
else:
artists_rasterized = []
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
if artists_rasterized:
for a in artists_rasterized:
a.draw(renderer)
renderer.stop_rasterizing()
mimage._draw_list_compositing_images(renderer, self, artists)
renderer.close_group('axes')
self.stale = False
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
if self.figure._cachedRenderer is None:
raise AttributeError("draw_artist can only be used after an "
"initial draw which caches the renderer")
a.draw(self.figure._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
if self.figure._cachedRenderer is None:
raise AttributeError("redraw_in_frame can only be used after an "
"initial draw which caches the renderer")
self.draw(self.figure._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self.figure._cachedRenderer
# Axes rectangle characteristics
def get_frame_on(self):
"""Get whether the axes rectangle patch is drawn."""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn.
Parameters
----------
b : bool
"""
self._frameon = b
self.stale = True
def get_axisbelow(self):
"""
Get whether axis ticks and gridlines are above or below most artists.
Returns
-------
axisbelow : bool or 'line'
See Also
--------
set_axisbelow
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether axis ticks and gridlines are above or below most artists.
This controls the zorder of the ticks and gridlines. For more
information on the zorder see :doc:`/gallery/misc/zorder_demo`.
Parameters
----------
b : bool or 'line'
Possible values:
- *True* (zorder = 0.5): Ticks and gridlines are below all Artists.
- 'line' (zorder = 1.5): Ticks and gridlines are above patches (
e.g. rectangles) but still below lines / markers.
- *False* (zorder = 2.5): Ticks and gridlines are above patches
and lines / markers.
See Also
--------
get_axisbelow
"""
self._axisbelow = axisbelow = validate_axisbelow(b)
if axisbelow is True:
zorder = 0.5
elif axisbelow is False:
zorder = 2.5
elif axisbelow == "line":
zorder = 1.5
else:
raise ValueError("Unexpected axisbelow value")
for axis in self._get_axis_list():
axis.set_zorder(zorder)
self.stale = True
@docstring.dedent_interpd
def grid(self, b=None, which='major', axis='both', **kwargs):
"""
Configure the grid lines.
Parameters
----------
b : bool or None
Whether to show the grid lines. If any *kwargs* are supplied,
it is assumed you want the grid on and *b* will be set to True.
If *b* is *None* and there are no *kwargs*, this toggles the
visibility of the lines.
which : {'major', 'minor', 'both'}
The grid lines to apply the changes on.
axis : {'both', 'x', 'y'}
The axis to apply the changes on.
**kwargs : `.Line2D` properties
Define the line properties of the grid, e.g.::
grid(color='r', linestyle='-', linewidth=2)
Valid *kwargs* are
%(_Line2D_docstr)s
Notes
-----
The grid will be drawn according to the axes' zorder and not its own.
"""
if len(kwargs):
b = True
cbook._check_in_list(['x', 'y', 'both'], axis=axis)
if axis in ['x', 'both']:
self.xaxis.grid(b, which=which, **kwargs)
if axis in ['y', 'both']:
self.yaxis.grid(b, which=which, **kwargs)
def ticklabel_format(self, *, axis='both', style='', scilimits=None,
useOffset=None, useLocale=None, useMathText=None):
r"""
Change the `~matplotlib.ticker.ScalarFormatter` used by
default for linear axes.
Optional keyword arguments:
============== =========================================
Keyword Description
============== =========================================
*axis* [ 'x' | 'y' | 'both' ]
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10\ :sup:`m` to 10\ :sup:`n`.
Use (0,0) to include all numbers.
Use (m,m) where m <> 0 to fix the order
of magnitude to 10\ :sup:`m`.
*useOffset* [ bool | offset ]; if True,
the offset will be calculated as needed;
if False, no offset will be used; if a
numeric offset is specified, it will be
used.
*useLocale* If True, format the number according to
the current locale. This affects things
such as the character used for the
decimal separator. If False, use
C-style (English) formatting. The
default setting is controlled by the
axes.formatter.use_locale rcparam.
*useMathText* If True, render the offset and scientific
notation in mathtext
============== =========================================
Only the major ticks are affected.
If the method is called when the `~matplotlib.ticker.ScalarFormatter`
is not the `~matplotlib.ticker.Formatter` being used, an
`AttributeError` will be raised.
"""
style = style.lower()
axis = axis.lower()
if scilimits is not None:
try:
m, n = scilimits
m + n + 1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style == 'plain':
sb = False
elif style == '':
sb = None
else:
raise ValueError("%s is not a valid style value")
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useOffset(useOffset)
if useLocale is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useLocale(useLocale)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useLocale(useLocale)
if useMathText is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_useMathText(useMathText)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_useMathText(useMathText)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs):
"""
Control behavior of tick locators.
Parameters
----------
axis : {'both', 'x', 'y'}, optional
The axis on which to operate.
tight : bool or None, optional
Parameter passed to :meth:`autoscale_view`.
Default is None, for no change.
Other Parameters
----------------
**kw
Remaining keyword arguments are passed to directly to the
:meth:`~matplotlib.ticker.MaxNLocator.set_params` method.
Typically one might want to reduce the maximum number of ticks and use
tight bounds when plotting small subplots, for example::
ax.locator_params(tight=True, nbins=4)
Because the locator is involved in autoscaling, :meth:`autoscale_view`
is called automatically after the parameters are changed.
This presently works only for the `~matplotlib.ticker.MaxNLocator` used
by default on linear axes, but it may be generalized.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y)
def tick_params(self, axis='both', **kwargs):
"""Change the appearance of ticks, tick labels, and gridlines.
Parameters
----------
axis : {'x', 'y', 'both'}, optional
Which axis to apply the parameters to.
Other Parameters
----------------
axis : {'x', 'y', 'both'}
Axis on which to operate; default is 'both'.
reset : bool
If *True*, set all parameters to defaults
before processing other keyword arguments. Default is
*False*.
which : {'major', 'minor', 'both'}
Default is 'major'; apply arguments to *which* ticks.
direction : {'in', 'out', 'inout'}
Puts ticks inside the axes, outside the axes, or both.
length : float
Tick length in points.
width : float
Tick width in points.
color : color
Tick color; accepts any mpl color spec.
pad : float
Distance in points between tick and label.
labelsize : float or str
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color
Tick label color; mpl color spec.
colors : color
Changes the tick color and the label color to the same value:
mpl color spec.
zorder : float
Tick and label zorder.
bottom, top, left, right : bool
Whether to draw the respective ticks.
labelbottom, labeltop, labelleft, labelright : bool
Whether to draw the respective tick labels.
labelrotation : float
Tick label rotation
grid_color : color
Changes the gridline color to the given mpl color spec.
grid_alpha : float
Transparency of gridlines: 0 (transparent) to 1 (opaque).
grid_linewidth : float
Width of gridlines in points.
grid_linestyle : string
Any valid `~matplotlib.lines.Line2D` line style spec.
Examples
--------
Usage ::
ax.tick_params(direction='out', length=6, width=2, colors='r',
grid_color='r', grid_alpha=0.5)
This will make all major ticks be red, pointing out of the box,
and with dimensions 6 points by 2 points. Tick labels will
also be red. Gridlines will be red and translucent.
"""
cbook._check_in_list(['x', 'y', 'both'], axis=axis)
if axis in ['x', 'both']:
xkw = dict(kwargs)
xkw.pop('left', None)
xkw.pop('right', None)
xkw.pop('labelleft', None)
xkw.pop('labelright', None)
self.xaxis.set_tick_params(**xkw)
if axis in ['y', 'both']:
ykw = dict(kwargs)
ykw.pop('top', None)
ykw.pop('bottom', None)
ykw.pop('labeltop', None)
ykw.pop('labelbottom', None)
self.yaxis.set_tick_params(**ykw)
def set_axis_off(self):
"""
Turn the x- and y-axis off.
This affects the axis lines, ticks, ticklabels, grid and axis labels.
"""
self.axison = False
self.stale = True
def set_axis_on(self):
"""
Turn the x- and y-axis on.
This affects the axis lines, ticks, ticklabels, grid and axis labels.
"""
self.axison = True
self.stale = True
# data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"""
Invert the x-axis.
See Also
--------
xaxis_inverted
get_xlim, set_xlim
get_xbound, set_xbound
"""
self.xaxis.set_inverted(not self.xaxis.get_inverted())
def xaxis_inverted(self):
"""
Return whether the x-axis is inverted.
The axis is inverted if the left value is larger than the right value.
See Also
--------
invert_xaxis
get_xlim, set_xlim
get_xbound, set_xbound
"""
return self.xaxis.get_inverted()
def get_xbound(self):
"""
Return the lower and upper x-axis bounds, in increasing order.
See Also
--------
set_xbound
get_xlim, set_xlim
invert_xaxis, xaxis_inverted
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the autoscaling setting (``Axes._autoscaleXon``).
Parameters
----------
lower, upper : float or None
The lower and upper bounds. If *None*, the respective axis bound
is not modified.
See Also
--------
get_xbound
get_xlim, set_xlim
invert_xaxis, xaxis_inverted
"""
if upper is None and np.iterable(lower):
lower, upper = lower
old_lower, old_upper = self.get_xbound()
if lower is None:
lower = old_lower
if upper is None:
upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower, auto=None)
else:
self.set_xlim(lower, upper, auto=None)
else:
if lower < upper:
self.set_xlim(lower, upper, auto=None)
else:
self.set_xlim(upper, lower, auto=None)
def get_xlim(self):
"""
Return the x-axis view limits.
Returns
-------
left, right : (float, float)
The current x-axis limits in data coordinates.
See Also
--------
set_xlim
set_xbound, get_xbound
invert_xaxis, xaxis_inverted
Notes
-----
The x-axis may be inverted, in which case the *left* value will
be greater than the *right* value.
"""
return tuple(self.viewLim.intervalx)
def _validate_converted_limits(self, limit, convert):
"""
Raise ValueError if converted limits are non-finite.
Note that this function also accepts None as a limit argument.
Returns
-------
The limit value after call to convert(), or None if limit is None.
"""
if limit is not None:
converted_limit = convert(limit)
if (isinstance(converted_limit, Real)
and not np.isfinite(converted_limit)):
raise ValueError("Axis limits cannot be NaN or Inf")
return converted_limit
def set_xlim(self, left=None, right=None, emit=True, auto=False,
*, xmin=None, xmax=None):
"""
Set the x-axis view limits.
.. ACCEPTS: (left: float, right: float)
Parameters
----------
left : scalar, optional
The left xlim in data coordinates. Passing *None* leaves the
limit unchanged.
The left and right xlims may be passed as the tuple
(*left*, *right*) as the first positional argument (or as
the *left* keyword argument).
right : scalar, optional
The right xlim in data coordinates. Passing *None* leaves the
limit unchanged.
emit : bool, optional
Whether to notify observers of limit change (default: True).
auto : bool or None, optional
Whether to turn on autoscaling of the x-axis. True turns on,
False turns off (default action), None leaves unchanged.
xmin, xmax : scalar, optional
They are equivalent to left and right respectively,
and it is an error to pass both *xmin* and *left* or
*xmax* and *right*.
Returns
-------
left, right : (float, float)
The new x-axis limits in data coordinates.
See Also
--------
get_xlim
set_xbound, get_xbound
invert_xaxis, xaxis_inverted
Notes
-----
The *left* value may be greater than the *right* value, in which
case the x-axis values will decrease from left to right.
Examples
--------
>>> set_xlim(left, right)
>>> set_xlim((left, right))
>>> left, right = set_xlim(left, right)
One limit may be left unchanged.
>>> set_xlim(right=right_lim)
Limits may be passed in reverse order to flip the direction of
the x-axis. For example, suppose *x* represents the number of
years before present. The x-axis limits might be set like the
following so 5000 years ago is on the left of the plot and the
present is on the right.
>>> set_xlim(5000, 0)
"""
if right is None and np.iterable(left):
left, right = left
if xmin is not None:
if left is not None:
raise TypeError('Cannot pass both `xmin` and `left`')
left = xmin
if xmax is not None:
if right is not None:
raise TypeError('Cannot pass both `xmax` and `right`')
right = xmax
self._process_unit_info(xdata=(left, right))
left = self._validate_converted_limits(left, self.convert_xunits)
right = self._validate_converted_limits(right, self.convert_xunits)
old_left, old_right = self.get_xlim()
if left is None:
left = old_left
if right is None:
right = old_right
if self.get_xscale() == 'log':
if left <= 0:
cbook._warn_external(
'Attempted to set non-positive left xlim on a '
'log-scaled axis.\n'
'Invalid limit will be ignored.')
left = old_left
if right <= 0:
cbook._warn_external(
'Attempted to set non-positive right xlim on a '
'log-scaled axis.\n'
'Invalid limit will be ignored.')
right = old_right
if left == right:
cbook._warn_external(
f"Attempting to set identical left == right == {left} results "
f"in singular transformations; automatically expanding.")
left, right = self.xaxis.get_major_locator().nonsingular(left, right)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx,
emit=False, auto=auto)
if other.figure != self.figure:
other.figure.canvas.draw_idle()
self.stale = True
return left, right
def get_xscale(self):
"""
Return the x-axis scale as string.
See Also
--------
set_xscale
"""
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
Set the x-axis scale.
Parameters
----------
value : {"linear", "log", "symlog", "logit", ...}
The axis scale type to apply.
**kwargs
Different keyword arguments are accepted, depending on the scale.
See the respective class keyword arguments:
- `matplotlib.scale.LinearScale`
- `matplotlib.scale.LogScale`
- `matplotlib.scale.SymmetricalLogScale`
- `matplotlib.scale.LogitScale`
Notes
-----
By default, Matplotlib supports the above mentioned scales.
Additionally, custom scales may be registered using
`matplotlib.scale.register_scale`. These scales can then also
be used here.
"""
g = self.get_shared_x_axes()
for ax in g.get_siblings(self):
ax.xaxis._set_scale(value, **kwargs)
ax._update_transScale()
ax.stale = True
self.autoscale_view(scaley=False)
def get_xticks(self, minor=False):
"""Return the x ticks as a list of locations"""
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
Parameters
----------
ticks : list
List of x-axis tick locations.
minor : bool, optional
If ``False`` sets major ticks, if ``True`` sets minor ticks.
Default is ``False``.
"""
ret = self.xaxis.set_ticks(ticks, minor=minor)
self.stale = True
return ret
def get_xmajorticklabels(self):
"""
Get the major x tick labels.
Returns
-------
labels : list
List of `~matplotlib.text.Text` instances
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
"""
Get the minor x tick labels.
Returns
-------
labels : list
List of `~matplotlib.text.Text` instances
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False, which=None):
"""
Get the x tick labels as a list of `~matplotlib.text.Text` instances.
Parameters
----------
minor : bool, optional
If True return the minor ticklabels,
else return the major ticklabels.
which : None, ('minor', 'major', 'both')
Overrides `minor`.
Selects which ticklabels to return
Returns
-------
ret : list
List of `~matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor,
which=which))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
Set the x-tick labels with list of string labels.
Parameters
----------
labels : List[str]
List of string labels.
fontdict : dict, optional
A dictionary controlling the appearance of the ticklabels.
The default `fontdict` is::
{'fontsize': rcParams['axes.titlesize'],
'fontweight': rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
minor : bool, optional
Whether to set the minor ticklabels rather than the major ones.
Returns
-------
A list of `~.text.Text` instances.
Other Parameters
-----------------
**kwargs : `~.text.Text` properties.
"""
if fontdict is not None:
kwargs.update(fontdict)
ret = self.xaxis.set_ticklabels(labels,
minor=minor, **kwargs)
self.stale = True
return ret
def invert_yaxis(self):
"""
Invert the y-axis.
See Also
--------
yaxis_inverted
get_ylim, set_ylim
get_ybound, set_ybound
"""
self.yaxis.set_inverted(not self.yaxis.get_inverted())
def yaxis_inverted(self):
"""
Return whether the y-axis is inverted.
The axis is inverted if the bottom value is larger than the top value.
See Also
--------
invert_yaxis
get_ylim, set_ylim
get_ybound, set_ybound
"""
return self.yaxis.get_inverted()
def get_ybound(self):
"""
Return the lower and upper y-axis bounds, in increasing order.
See Also
--------
set_ybound
get_ylim, set_ylim
invert_yaxis, yaxis_inverted
"""
bottom, top = self.get_ylim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_ybound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the autoscaling setting (``Axes._autoscaleYon``).
Parameters
----------
lower, upper : float or None
The lower and upper bounds. If *None*, the respective axis bound
is not modified.
See Also
--------
get_ybound
get_ylim, set_ylim
invert_yaxis, yaxis_inverted
"""
if upper is None and np.iterable(lower):
lower, upper = lower
old_lower, old_upper = self.get_ybound()
if lower is None:
lower = old_lower
if upper is None:
upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower, auto=None)
else:
self.set_ylim(lower, upper, auto=None)
else:
if lower < upper:
self.set_ylim(lower, upper, auto=None)
else:
self.set_ylim(upper, lower, auto=None)
def get_ylim(self):
"""
Return the y-axis view limits.
Returns
-------
bottom, top : (float, float)
The current y-axis limits in data coordinates.
See Also
--------
set_ylim
set_ybound, get_ybound
invert_yaxis, yaxis_inverted
Notes
-----
The y-axis may be inverted, in which case the *bottom* value
will be greater than the *top* value.
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, bottom=None, top=None, emit=True, auto=False,
*, ymin=None, ymax=None):
"""
Set the y-axis view limits.
.. ACCEPTS: (bottom: float, top: float)
Parameters
----------
bottom : scalar, optional
The bottom ylim in data coordinates. Passing *None* leaves the
limit unchanged.
The bottom and top ylims may be passed as the tuple
(*bottom*, *top*) as the first positional argument (or as
the *bottom* keyword argument).
top : scalar, optional
The top ylim in data coordinates. Passing *None* leaves the
limit unchanged.
emit : bool, optional
Whether to notify observers of limit change (default: ``True``).
auto : bool or None, optional
Whether to turn on autoscaling of the y-axis. *True* turns on,
*False* turns off (default action), *None* leaves unchanged.
ymin, ymax : scalar, optional
They are equivalent to bottom and top respectively,
and it is an error to pass both *ymin* and *bottom* or
*ymax* and *top*.
Returns
-------
bottom, top : (float, float)
The new y-axis limits in data coordinates.
See Also
--------
get_ylim
set_ybound, get_ybound
invert_yaxis, yaxis_inverted
Notes
-----
The *bottom* value may be greater than the *top* value, in which
case the y-axis values will decrease from *bottom* to *top*.
Examples
--------
>>> set_ylim(bottom, top)
>>> set_ylim((bottom, top))
>>> bottom, top = set_ylim(bottom, top)
One limit may be left unchanged.
>>> set_ylim(top=top_lim)
Limits may be passed in reverse order to flip the direction of
the y-axis. For example, suppose ``y`` represents depth of the
ocean in m. The y-axis limits might be set like the following
so 5000 m depth is at the bottom of the plot and the surface,
0 m, is at the top.
>>> set_ylim(5000, 0)
"""
if top is None and np.iterable(bottom):
bottom, top = bottom
if ymin is not None:
if bottom is not None:
raise TypeError('Cannot pass both `ymin` and `bottom`')
bottom = ymin
if ymax is not None:
if top is not None:
raise TypeError('Cannot pass both `ymax` and `top`')
top = ymax
self._process_unit_info(ydata=(bottom, top))
bottom = self._validate_converted_limits(bottom, self.convert_yunits)
top = self._validate_converted_limits(top, self.convert_yunits)
old_bottom, old_top = self.get_ylim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if self.get_yscale() == 'log':
if bottom <= 0:
cbook._warn_external(
'Attempted to set non-positive bottom ylim on a '
'log-scaled axis.\n'
'Invalid limit will be ignored.')
bottom = old_bottom
if top <= 0:
cbook._warn_external(
'Attempted to set non-positive top ylim on a '
'log-scaled axis.\n'
'Invalid limit will be ignored.')
top = old_top
if bottom == top:
cbook._warn_external(
f"Attempting to set identical bottom == top == {bottom} "
f"results in singular transformations; automatically "
f"expanding.")
bottom, top = self.yaxis.get_major_locator().nonsingular(bottom, top)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly,
emit=False, auto=auto)
if other.figure != self.figure:
other.figure.canvas.draw_idle()
self.stale = True
return bottom, top
def get_yscale(self):
"""
Return the y-axis scale as string.
See Also
--------
set_yscale
"""
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
Set the y-axis scale.
Parameters
----------
value : {"linear", "log", "symlog", "logit", ...}
The axis scale type to apply.
**kwargs
Different keyword arguments are accepted, depending on the scale.
See the respective class keyword arguments:
- `matplotlib.scale.LinearScale`
- `matplotlib.scale.LogScale`
- `matplotlib.scale.SymmetricalLogScale`
- `matplotlib.scale.LogitScale`
Notes
-----
By default, Matplotlib supports the above mentioned scales.
Additionally, custom scales may be registered using
`matplotlib.scale.register_scale`. These scales can then also
be used here.
"""
g = self.get_shared_y_axes()
for ax in g.get_siblings(self):
ax.yaxis._set_scale(value, **kwargs)
ax._update_transScale()
ax.stale = True
self.autoscale_view(scalex=False)
def get_yticks(self, minor=False):
"""Return the y ticks as a list of locations"""
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
Parameters
----------
ticks : list
List of y-axis tick locations
minor : bool, optional
If ``False`` sets major ticks, if ``True`` sets minor ticks.
Default is ``False``.
"""
ret = self.yaxis.set_ticks(ticks, minor=minor)
return ret
def get_ymajorticklabels(self):
"""
Get the major y tick labels.
Returns
-------
labels : list
List of `~matplotlib.text.Text` instances
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
"""
Get the minor y tick labels.
Returns
-------
labels : list
List of `~matplotlib.text.Text` instances
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False, which=None):
"""
Get the y tick labels as a list of `~matplotlib.text.Text` instances.
Parameters
----------
minor : bool
If True return the minor ticklabels,
else return the major ticklabels
which : None, ('minor', 'major', 'both')
Overrides `minor`.
Selects which ticklabels to return
Returns
-------
ret : list
List of `~matplotlib.text.Text` instances.
"""
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor,
which=which))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
Set the y-tick labels with list of strings labels.
Parameters
----------
labels : List[str]
list of string labels
fontdict : dict, optional
A dictionary controlling the appearance of the ticklabels.
The default `fontdict` is::
{'fontsize': rcParams['axes.titlesize'],
'fontweight': rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
minor : bool, optional
Whether to set the minor ticklabels rather than the major ones.
Returns
-------
A list of `~.text.Text` instances.
Other Parameters
----------------
**kwargs : `~.text.Text` properties.
"""
if fontdict is not None:
kwargs.update(fontdict)
return self.yaxis.set_ticklabels(labels,
minor=minor, **kwargs)
def xaxis_date(self, tz=None):
"""
Sets up x-axis ticks and labels that treat the x data as dates.
Parameters
----------
tz : string or `tzinfo` instance, optional
Timezone. Defaults to :rc:`timezone`.
"""
# should be enough to inform the unit conversion interface
# dates are coming in
self.xaxis.axis_date(tz)
def yaxis_date(self, tz=None):
"""
Sets up y-axis ticks and labels that treat the y data as dates.
Parameters
----------
tz : string or `tzinfo` instance, optional
Timezone. Defaults to :rc:`timezone`.
"""
self.yaxis.axis_date(tz)
def format_xdata(self, x):
"""
Return *x* formatted as an x-value.
This function will use the `.fmt_xdata` attribute if it is not None,
else will fall back on the xaxis major formatter.
"""
return (self.fmt_xdata if self.fmt_xdata is not None
else self.xaxis.get_major_formatter().format_data_short)(x)
def format_ydata(self, y):
"""
Return *y* formatted as an y-value.
This function will use the `.fmt_ydata` attribute if it is not None,
else will fall back on the yaxis major formatter.
"""
return (self.fmt_ydata if self.fmt_ydata is not None
else self.yaxis.get_major_formatter().format_data_short)(y)
def format_coord(self, x, y):
"""Return a format string formatting the *x*, *y* coordinates."""
if x is None:
xs = '???'
else:
xs = self.format_xdata(x)
if y is None:
ys = '???'
else:
ys = self.format_ydata(y)
return 'x=%s y=%s' % (xs, ys)
def minorticks_on(self):
"""
Display minor ticks on the axes.
Displaying minor ticks may reduce performance; you may turn them off
using `minorticks_off()` if drawing speed is a problem.
"""
for ax in (self.xaxis, self.yaxis):
scale = ax.get_scale()
if scale == 'log':
s = ax._scale
ax.set_minor_locator(mticker.LogLocator(s.base, s.subs))
elif scale == 'symlog':
s = ax._scale
ax.set_minor_locator(
mticker.SymmetricalLogLocator(s._transform, s.subs))
else:
ax.set_minor_locator(mticker.AutoMinorLocator())
def minorticks_off(self):
"""Remove minor ticks from the axes."""
self.xaxis.set_minor_locator(mticker.NullLocator())
self.yaxis.set_minor_locator(mticker.NullLocator())
# Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
"""
return True
def can_pan(self):
"""
Return *True* if this axes supports any pan/zoom button functionality.
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
Parameters
----------
b : bool
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def _get_view(self):
"""
Save information required to reproduce the current view.
Called before a view is changed, such as during a pan or zoom
initiated by the user. You may return any information you deem
necessary to describe the view.
.. note::
Intended to be overridden by new projection types, but if not, the
default implementation saves the view limits. You *must* implement
:meth:`_set_view` if you implement this method.
"""
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return (xmin, xmax, ymin, ymax)
def _set_view(self, view):
"""
Apply a previously saved view.
Called when restoring a view, such as with the navigation buttons.
.. note::
Intended to be overridden by new projection types, but if not, the
default implementation restores the view limits. You *must*
implement :meth:`_get_view` if you implement this method.
"""
xmin, xmax, ymin, ymax = view
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
def _set_view_from_bbox(self, bbox, direction='in',
mode=None, twinx=False, twiny=False):
"""
Update view from a selection bbox.
.. note::
Intended to be overridden by new projection types, but if not, the
default implementation sets the view limits to the bbox directly.
Parameters
----------
bbox : 4-tuple or 3 tuple
* If bbox is a 4 tuple, it is the selected bounding box limits,
in *display* coordinates.
* If bbox is a 3 tuple, it is an (xp, yp, scl) triple, where
(xp,yp) is the center of zooming and scl the scale factor to
zoom by.
direction : str
The direction to apply the bounding box.
* `'in'` - The bounding box describes the view directly, i.e.,
it zooms in.
* `'out'` - The bounding box describes the size to make the
existing view, i.e., it zooms out.
mode : str or None
The selection mode, whether to apply the bounding box in only the
`'x'` direction, `'y'` direction or both (`None`).
twinx : bool
Whether this axis is twinned in the *x*-direction.
twiny : bool
Whether this axis is twinned in the *y*-direction.
"""
Xmin, Xmax = self.get_xlim()
Ymin, Ymax = self.get_ylim()
if len(bbox) == 3:
# Zooming code
xp, yp, scl = bbox
# Should not happen
if scl == 0:
scl = 1.
# direction = 'in'
if scl > 1:
direction = 'in'
else:
direction = 'out'
scl = 1/scl
# get the limits of the axes
tranD2C = self.transData.transform
xmin, ymin = tranD2C((Xmin, Ymin))
xmax, ymax = tranD2C((Xmax, Ymax))
# set the range
xwidth = xmax - xmin
ywidth = ymax - ymin
xcen = (xmax + xmin)*.5
ycen = (ymax + ymin)*.5
xzc = (xp*(scl - 1) + xcen)/scl
yzc = (yp*(scl - 1) + ycen)/scl
bbox = [xzc - xwidth/2./scl, yzc - ywidth/2./scl,
xzc + xwidth/2./scl, yzc + ywidth/2./scl]
elif len(bbox) != 4:
# should be len 3 or 4 but nothing else
cbook._warn_external(
"Warning in _set_view_from_bbox: bounding box is not a tuple "
"of length 3 or 4. Ignoring the view change.")
return
# Just grab bounding box
lastx, lasty, x, y = bbox
# zoom to rect
inverse = self.transData.inverted()
lastx, lasty = inverse.transform_point((lastx, lasty))
x, y = inverse.transform_point((x, y))
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x < lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 < Xmin:
x0 = Xmin
if x1 > Xmax:
x1 = Xmax
else:
if x > lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 > Xmin:
x0 = Xmin
if x1 < Xmax:
x1 = Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y < lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 < Ymin:
y0 = Ymin
if y1 > Ymax:
y1 = Ymax
else:
if y > lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 > Ymin:
y0 = Ymin
if y1 < Ymax:
y1 = Ymax
if direction == 'in':
if mode == 'x':
self.set_xlim((x0, x1))
elif mode == 'y':
self.set_ylim((y0, y1))
else:
self.set_xlim((x0, x1))
self.set_ylim((y0, y1))
elif direction == 'out':
if self.get_xscale() == 'log':
alpha = np.log(Xmax / Xmin) / np.log(x1 / x0)
rx1 = pow(Xmin / x0, alpha) * Xmin
rx2 = pow(Xmax / x0, alpha) * Xmin
else:
alpha = (Xmax - Xmin) / (x1 - x0)
rx1 = alpha * (Xmin - x0) + Xmin
rx2 = alpha * (Xmax - x0) + Xmin
if self.get_yscale() == 'log':
alpha = np.log(Ymax / Ymin) / np.log(y1 / y0)
ry1 = pow(Ymin / y0, alpha) * Ymin
ry2 = pow(Ymax / y0, alpha) * Ymin
else:
alpha = (Ymax - Ymin) / (y1 - y0)
ry1 = alpha * (Ymin - y0) + Ymin
ry2 = alpha * (Ymax - y0) + Ymin
if mode == 'x':
self.set_xlim((rx1, rx2))
elif mode == 'y':
self.set_ylim((ry1, ry2))
else:
self.set_xlim((rx1, rx2))
self.set_ylim((ry1, ry2))
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = types.SimpleNamespace(
lim=self.viewLim.frozen(),
trans=self.transData.frozen(),
trans_inverse=self.transData.inverted().frozen(),
bbox=self.bbox.frozen(),
x=x,
y=y)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key == 'control':
if abs(dx) > abs(dy):
dy = dx
else:
dx = dy
elif key == 'x':
dy = 0
elif key == 'y':
dx = 0
elif key == 'shift':
if 2 * abs(dx) < abs(dy):
dx = 0
elif 2 * abs(dy) < abs(dx):
dy = 0
elif abs(dx) > abs(dy):
dy = dy / abs(dy) * abs(dx)
else:
dx = dx / abs(dx) * abs(dy)
return dx, dy
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy).transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / self.bbox.width
dy = -dy / self.bbox.height
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = dy = 0.5 * (dx + dy)
alpha = np.power(10.0, (dx, dy))
start = np.array([p.x, p.y])
oldpoints = p.lim.transformed(p.trans)
newpoints = start + alpha * (oldpoints - start)
result = (mtransforms.Bbox(newpoints)
.transformed(p.trans_inverse))
except OverflowError:
cbook._warn_external('Overflow while panning')
return
else:
return
valid = np.isfinite(result.transformed(p.trans))
points = result.get_points().astype(object)
# Just ignore invalid limits (typically, underflow in log-scale).
points[~valid] = None
self.set_xlim(points[:, 0])
self.set_ylim(points[:, 1])
def get_children(self):
# docstring inherited.
return [
*self.collections,
*self.patches,
*self.lines,
*self.texts,
*self.artists,
*self.spines.values(),
*self._get_axis_list(),
self.title, self._left_title, self._right_title,
*self.tables,
*self.images,
*self.child_axes,
*([self.legend_] if self.legend_ is not None else []),
self.patch,
]
def contains(self, mouseevent):
# docstring inherited.
if self._contains is not None:
return self._contains(self, mouseevent)
return self.patch.contains(mouseevent)
def contains_point(self, point):
"""
Return whether *point* (pair of pixel coordinates) is inside the axes
patch.
"""
return self.patch.contains_point(point, radius=1.0)
def get_default_bbox_extra_artists(self):
"""
Return a default list of artists that are used for the bounding box
calculation.
Artists are excluded either by not being visible or
``artist.set_in_layout(False)``.
"""
artists = self.get_children()
if not (self.axison and self._frameon):
# don't do bbox on spines if frame not on.
for spine in self.spines.values():
artists.remove(spine)
if not self.axison:
for _axis in self._get_axis_list():
artists.remove(_axis)
return [artist for artist in artists
if (artist.get_visible() and artist.get_in_layout())]
def get_tightbbox(self, renderer, call_axes_locator=True,
bbox_extra_artists=None):
"""
Return the tight bounding box of the axes, including axis and their
decorators (xlabel, title, etc).
Artists that have ``artist.set_in_layout(False)`` are not included
in the bbox.
Parameters
----------
renderer : `.RendererBase` instance
renderer that will be used to draw the figures (i.e.
``fig.canvas.get_renderer()``)
bbox_extra_artists : list of `.Artist` or ``None``
List of artists to include in the tight bounding box. If
``None`` (default), then all artist children of the axes are
included in the tight bounding box.
call_axes_locator : boolean (default ``True``)
If *call_axes_locator* is ``False``, it does not call the
``_axes_locator`` attribute, which is necessary to get the correct
bounding box. ``call_axes_locator=False`` can be used if the
caller is only interested in the relative size of the tightbbox
compared to the axes bbox.
Returns
-------
bbox : `.BboxBase`
bounding box in figure pixel coordinates.
See Also
--------
matplotlib.axis.Axes.get_window_extent
matplotlib.axis.Axis.get_tightbbox
matplotlib.spines.get_window_extent
"""
bb = []
if not self.get_visible():
return None
locator = self.get_axes_locator()
if locator and call_axes_locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self.axison:
bb_xaxis = self.xaxis.get_tightbbox(renderer)
if bb_xaxis:
bb.append(bb_xaxis)
bb_yaxis = self.yaxis.get_tightbbox(renderer)
if bb_yaxis:
bb.append(bb_yaxis)
self._update_title_position(renderer)
bb.append(self.get_window_extent(renderer))
self._update_title_position(renderer)
if self.title.get_visible():
bb.append(self.title.get_window_extent(renderer))
if self._left_title.get_visible():
bb.append(self._left_title.get_window_extent(renderer))
if self._right_title.get_visible():
bb.append(self._right_title.get_window_extent(renderer))
bb.append(self.get_window_extent(renderer))
bbox_artists = bbox_extra_artists
if bbox_artists is None:
bbox_artists = self.get_default_bbox_extra_artists()
for a in bbox_artists:
bbox = a.get_tightbbox(renderer)
if (bbox is not None
and 0 < bbox.width < np.inf
and 0 < bbox.height < np.inf):
bb.append(bbox)
_bbox = mtransforms.Bbox.union(
[b for b in bb if b.width != 0 or b.height != 0])
return _bbox
def _make_twin_axes(self, *kl, **kwargs):
"""
Make a twinx axes of self. This is used for twinx and twiny.
"""
# Typically, SubplotBase._make_twin_axes is called instead of this.
# There is also an override in axes_grid1/axes_divider.py.
if 'sharex' in kwargs and 'sharey' in kwargs:
raise ValueError("Twinned Axes may share only one axis.")
ax2 = self.figure.add_axes(self.get_position(True), *kl, **kwargs)
self.set_adjustable('datalim')
ax2.set_adjustable('datalim')
self._twinned_axes.join(self, ax2)
return ax2
def twinx(self):
"""
Create a twin Axes sharing the xaxis
Create a new Axes instance with an invisible x-axis and an independent
y-axis positioned opposite to the original one (i.e. at right). The
x-axis autoscale setting will be inherited from the original Axes.
To ensure that the tick marks of both y-axes align, see
`~matplotlib.ticker.LinearLocator`
Returns
-------
ax_twin : Axes
The newly created Axes instance
Notes
-----
For those who are 'picking' artists while using twinx, pick
events are only called for the artists in the top-most axes.
"""
ax2 = self._make_twin_axes(sharex=self)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
ax2.yaxis.set_offset_position('right')
ax2.set_autoscalex_on(self.get_autoscalex_on())
self.yaxis.tick_left()
ax2.xaxis.set_visible(False)
ax2.patch.set_visible(False)
return ax2
def twiny(self):
"""
Create a twin Axes sharing the yaxis
Create a new Axes instance with an invisible y-axis and an independent
x-axis positioned opposite to the original one (i.e. at top). The
y-axis autoscale setting will be inherited from the original Axes.
To ensure that the tick marks of both x-axes align, see
`~matplotlib.ticker.LinearLocator`
Returns
-------
ax_twin : Axes
The newly created Axes instance
Notes
-----
For those who are 'picking' artists while using twiny, pick
events are only called for the artists in the top-most axes.
"""
ax2 = self._make_twin_axes(sharey=self)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
ax2.set_autoscaley_on(self.get_autoscaley_on())
self.xaxis.tick_bottom()
ax2.yaxis.set_visible(False)
ax2.patch.set_visible(False)
return ax2
def get_shared_x_axes(self):
"""Return a reference to the shared axes Grouper object for x axes."""
return self._shared_x_axes
def get_shared_y_axes(self):
"""Return a reference to the shared axes Grouper object for y axes."""
return self._shared_y_axes
|
9e942e06fb3fc440441943f6f81059cc2ade96c01062007553dea44994d2588b
|
from ._subplots import *
from ._axes import *
|
6c4d3e43e8b4842976f388f41a131f1269c5f9131f8ab688a4e98760117c7e83
|
import collections.abc
import functools
import itertools
import logging
import math
import operator
from numbers import Number
import numpy as np
from numpy import ma
from matplotlib import _preprocess_data, rcParams
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.category as _ # <-registers a category unit converter
import matplotlib.dates as _ # <-registers a date unit converter
import matplotlib.docstring as docstring
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.markers as mmarkers
import matplotlib.mlab as mlab
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.stackplot as mstack
import matplotlib.streamplot as mstream
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.tri as mtri
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
from matplotlib.axes._base import _AxesBase, _process_plot_format
from matplotlib.axes._secondary_axes import SecondaryAxis
try:
from numpy.lib.histograms import histogram_bin_edges
except ImportError:
# this function is new in np 1.15
def histogram_bin_edges(arr, bins, range=None, weights=None):
# this in True for 1D arrays, and False for None and str
if np.ndim(bins) == 1:
return bins
if isinstance(bins, str):
# rather than backporting the internals, just do the full
# computation. If this is too slow for users, they can
# update numpy, or pick a manual number of bins
return np.histogram(arr, bins, range, weights)[1]
else:
if bins is None:
# hard-code numpy's default
bins = 10
if range is None:
range = np.min(arr), np.max(arr)
return np.linspace(*range, bins + 1)
_log = logging.getLogger(__name__)
def _make_inset_locator(bounds, trans, parent):
"""
Helper function to locate inset axes, used in
`.Axes.inset_axes`.
A locator gets used in `Axes.set_aspect` to override the default
locations... It is a function that takes an axes object and
a renderer and tells `set_aspect` where it is to be placed.
Here *rect* is a rectangle [l, b, w, h] that specifies the
location for the axes in the transform given by *trans* on the
*parent*.
"""
_bounds = mtransforms.Bbox.from_bounds(*bounds)
_trans = trans
_parent = parent
def inset_locator(ax, renderer):
bbox = _bounds
bb = mtransforms.TransformedBbox(bbox, _trans)
tr = _parent.figure.transFigure.inverted()
bb = mtransforms.TransformedBbox(bb, tr)
return bb
return inset_locator
# The axes module contains all the wrappers to plotting functions.
# All the other methods should go in the _AxesBase class.
class Axes(_AxesBase):
"""
The `Axes` contains most of the figure elements: `~.axis.Axis`,
`~.axis.Tick`, `~.lines.Line2D`, `~.text.Text`, `~.patches.Polygon`, etc.,
and sets the coordinate system.
The `Axes` instance supports callbacks through a callbacks attribute which
is a `~.cbook.CallbackRegistry` instance. The events you can connect to
are 'xlim_changed' and 'ylim_changed' and the callback will be called with
func(*ax*) where *ax* is the `Axes` instance.
Attributes
----------
dataLim : `.BBox`
The bounding box enclosing all data displayed in the Axes.
viewLim : `.BBox`
The view limits in data coordinates.
"""
### Labelling, legend and texts
@cbook.deprecated("3.1")
@property
def aname(self):
return 'Axes'
def get_title(self, loc="center"):
"""
Get an axes title.
Get one of the three available axes titles. The available titles
are positioned above the axes in the center, flush with the left
edge, and flush with the right edge.
Parameters
----------
loc : {'center', 'left', 'right'}, str, optional
Which title to get, defaults to 'center'.
Returns
-------
title : str
The title text string.
"""
try:
title = {'left': self._left_title,
'center': self.title,
'right': self._right_title}[loc.lower()]
except KeyError:
raise ValueError("'%s' is not a valid location" % loc)
return title.get_text()
def set_title(self, label, fontdict=None, loc=None, pad=None,
**kwargs):
"""
Set a title for the axes.
Set one of the three available axes titles. The available titles
are positioned above the axes in the center, flush with the left
edge, and flush with the right edge.
Parameters
----------
label : str
Text to use for the title
fontdict : dict
A dictionary controlling the appearance of the title text,
the default `fontdict` is::
{'fontsize': rcParams['axes.titlesize'],
'fontweight' : rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
loc : {'center', 'left', 'right'}, str, optional
Which title to set, defaults to rcParams['axes.titlelocation']
pad : float
The offset of the title from the top of the axes, in points.
Default is ``None`` to use rcParams['axes.titlepad'].
Returns
-------
text : :class:`~matplotlib.text.Text`
The matplotlib text instance representing the title
Other Parameters
----------------
**kwargs : `~matplotlib.text.Text` properties
Other keyword arguments are text properties, see
:class:`~matplotlib.text.Text` for a list of valid text
properties.
"""
try:
if loc is None:
loc = rcParams['axes.titlelocation']
title = {'left': self._left_title,
'center': self.title,
'right': self._right_title}[loc.lower()]
except KeyError:
raise ValueError("'%s' is not a valid location" % loc)
default = {
'fontsize': rcParams['axes.titlesize'],
'fontweight': rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc.lower()}
if pad is None:
pad = rcParams['axes.titlepad']
self._set_title_offset_trans(float(pad))
title.set_text(label)
title.update(default)
if fontdict is not None:
title.update(fontdict)
title.update(kwargs)
return title
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the x-axis.
Parameters
----------
xlabel : str
The label text.
labelpad : scalar, optional, default: None
Spacing in points from the axes bounding box including ticks
and tick labels.
Other Parameters
----------------
**kwargs : `.Text` properties
`.Text` properties control the appearance of the label.
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.xaxis.labelpad = labelpad
return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the y-axis.
Parameters
----------
ylabel : str
The label text.
labelpad : scalar, optional, default: None
Spacing in points from the axes bounding box including ticks
and tick labels.
Other Parameters
----------------
**kwargs : `.Text` properties
`.Text` properties control the appearance of the label.
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.yaxis.labelpad = labelpad
return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
def get_legend_handles_labels(self, legend_handler_map=None):
"""
Return handles and labels for legend
``ax.legend()`` is equivalent to ::
h, l = ax.get_legend_handles_labels()
ax.legend(h, l)
"""
# pass through to legend.
handles, labels = mlegend._get_legend_handles_labels([self],
legend_handler_map)
return handles, labels
@docstring.dedent_interpd
def legend(self, *args, **kwargs):
"""
Place a legend on the axes.
Call signatures::
legend()
legend(labels)
legend(handles, labels)
The call signatures correspond to three different ways how to use
this method.
**1. Automatic detection of elements to be shown in the legend**
The elements to be added to the legend are automatically determined,
when you do not pass in any extra arguments.
In this case, the labels are taken from the artist. You can specify
them either at artist creation or by calling the
:meth:`~.Artist.set_label` method on the artist::
line, = ax.plot([1, 2, 3], label='Inline label')
ax.legend()
or::
line.set_label('Label via method')
line, = ax.plot([1, 2, 3])
ax.legend()
Specific lines can be excluded from the automatic legend element
selection by defining a label starting with an underscore.
This is default for all artists, so calling `Axes.legend` without
any arguments and without setting the labels manually will result in
no legend being drawn.
**2. Labeling existing plot elements**
To make a legend for lines which already exist on the axes
(via plot for instance), simply call this function with an iterable
of strings, one for each legend item. For example::
ax.plot([1, 2, 3])
ax.legend(['A simple line'])
Note: This way of using is discouraged, because the relation between
plot elements and labels is only implicit by their order and can
easily be mixed up.
**3. Explicitly defining the elements in the legend**
For full control of which artists have a legend entry, it is possible
to pass an iterable of legend artists followed by an iterable of
legend labels respectively::
legend((line1, line2, line3), ('label1', 'label2', 'label3'))
Parameters
----------
handles : sequence of `.Artist`, optional
A list of Artists (lines, patches) to be added to the legend.
Use this together with *labels*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
The length of handles and labels should be the same in this
case. If they are not, they are truncated to the smaller length.
labels : sequence of strings, optional
A list of labels to show next to the artists.
Use this together with *handles*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
Other Parameters
----------------
%(_legend_kw_doc)s
Returns
-------
:class:`matplotlib.legend.Legend` instance
Notes
-----
Not all kinds of artist are supported by the legend command. See
:doc:`/tutorials/intermediate/legend_guide` for details.
Examples
--------
.. plot:: gallery/text_labels_and_annotations/legend.py
"""
handles, labels, extra_args, kwargs = mlegend._parse_legend_args(
[self],
*args,
**kwargs)
if len(extra_args):
raise TypeError('legend only accepts two non-keyword arguments')
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
self.legend_._remove_method = self._remove_legend
return self.legend_
def _remove_legend(self, legend):
self.legend_ = None
def inset_axes(self, bounds, *, transform=None, zorder=5,
**kwargs):
"""
Add a child inset axes to this existing axes.
Warnings
--------
This method is experimental as of 3.0, and the API may change.
Parameters
----------
bounds : [x0, y0, width, height]
Lower-left corner of inset axes, and its width and height.
transform : `.Transform`
Defaults to `ax.transAxes`, i.e. the units of *rect* are in
axes-relative coordinates.
zorder : number
Defaults to 5 (same as `.Axes.legend`). Adjust higher or lower
to change whether it is above or below data plotted on the
parent axes.
**kwargs
Other *kwargs* are passed on to the `axes.Axes` child axes.
Returns
-------
Axes
The created `.axes.Axes` instance.
Examples
--------
This example makes two inset axes, the first is in axes-relative
coordinates, and the second in data-coordinates::
fig, ax = plt.subplots()
ax.plot(range(10))
axin1 = ax.inset_axes([0.8, 0.1, 0.15, 0.15])
axin2 = ax.inset_axes(
[5, 7, 2.3, 2.3], transform=ax.transData)
"""
if transform is None:
transform = self.transAxes
label = kwargs.pop('label', 'inset_axes')
# This puts the rectangle into figure-relative coordinates.
inset_locator = _make_inset_locator(bounds, transform, self)
bb = inset_locator(None, None)
inset_ax = Axes(self.figure, bb.bounds, zorder=zorder,
label=label, **kwargs)
# this locator lets the axes move if in data coordinates.
# it gets called in `ax.apply_aspect() (of all places)
inset_ax.set_axes_locator(inset_locator)
self.add_child_axes(inset_ax)
return inset_ax
def indicate_inset(self, bounds, inset_ax=None, *, transform=None,
facecolor='none', edgecolor='0.5', alpha=0.5,
zorder=4.99, **kwargs):
"""
Add an inset indicator to the axes. This is a rectangle on the plot
at the position indicated by *bounds* that optionally has lines that
connect the rectangle to an inset axes
(`.Axes.inset_axes`).
Warnings
--------
This method is experimental as of 3.0, and the API may change.
Parameters
----------
bounds : [x0, y0, width, height]
Lower-left corner of rectangle to be marked, and its width
and height.
inset_ax : `.Axes`
An optional inset axes to draw connecting lines to. Two lines are
drawn connecting the indicator box to the inset axes on corners
chosen so as to not overlap with the indicator box.
transform : `.Transform`
Transform for the rectangle co-ordinates. Defaults to
`ax.transAxes`, i.e. the units of *rect* are in axes-relative
coordinates.
facecolor : Matplotlib color
Facecolor of the rectangle (default 'none').
edgecolor : Matplotlib color
Color of the rectangle and color of the connecting lines. Default
is '0.5'.
alpha : number
Transparency of the rectangle and connector lines. Default is 0.5.
zorder : number
Drawing order of the rectangle and connector lines. Default is 4.99
(just below the default level of inset axes).
**kwargs
Other *kwargs* are passed on to the rectangle patch.
Returns
-------
rectangle_patch : `.Patches.Rectangle`
Rectangle artist.
connector_lines : 4-tuple of `.Patches.ConnectionPatch`
One for each of four connector lines. Two are set with visibility
to *False*, but the user can set the visibility to True if the
automatic choice is not deemed correct.
"""
# to make the axes connectors work, we need to apply the aspect to
# the parent axes.
self.apply_aspect()
if transform is None:
transform = self.transData
label = kwargs.pop('label', 'indicate_inset')
xy = (bounds[0], bounds[1])
rectpatch = mpatches.Rectangle(xy, bounds[2], bounds[3],
facecolor=facecolor, edgecolor=edgecolor, alpha=alpha,
zorder=zorder, label=label, transform=transform, **kwargs)
self.add_patch(rectpatch)
if inset_ax is not None:
# want to connect the indicator to the rect....
connects = []
xr = [bounds[0], bounds[0]+bounds[2]]
yr = [bounds[1], bounds[1]+bounds[3]]
for xc in range(2):
for yc in range(2):
xyA = (xc, yc)
xyB = (xr[xc], yr[yc])
connects += [mpatches.ConnectionPatch(xyA, xyB,
'axes fraction', 'data',
axesA=inset_ax, axesB=self, arrowstyle="-",
zorder=zorder, edgecolor=edgecolor, alpha=alpha)]
self.add_patch(connects[-1])
# decide which two of the lines to keep visible....
pos = inset_ax.get_position()
bboxins = pos.transformed(self.figure.transFigure)
rectbbox = mtransforms.Bbox.from_bounds(
*bounds).transformed(transform)
x0 = rectbbox.x0 < bboxins.x0
x1 = rectbbox.x1 < bboxins.x1
y0 = rectbbox.y0 < bboxins.y0
y1 = rectbbox.y1 < bboxins.y1
connects[0].set_visible(x0 ^ y0)
connects[1].set_visible(x0 == y1)
connects[2].set_visible(x1 == y0)
connects[3].set_visible(x1 ^ y1)
return rectpatch, connects
def indicate_inset_zoom(self, inset_ax, **kwargs):
"""
Add an inset indicator rectangle to the axes based on the axis
limits for an *inset_ax* and draw connectors between *inset_ax*
and the rectangle.
Warnings
--------
This method is experimental as of 3.0, and the API may change.
Parameters
----------
inset_ax : `.Axes`
Inset axes to draw connecting lines to. Two lines are
drawn connecting the indicator box to the inset axes on corners
chosen so as to not overlap with the indicator box.
**kwargs
Other *kwargs* are passed on to `.Axes.inset_rectangle`
Returns
-------
rectangle_patch : `.Patches.Rectangle`
Rectangle artist.
connector_lines : 4-tuple of `.Patches.ConnectionPatch`
One for each of four connector lines. Two are set with visibility
to *False*, but the user can set the visibility to True if the
automatic choice is not deemed correct.
"""
xlim = inset_ax.get_xlim()
ylim = inset_ax.get_ylim()
rect = [xlim[0], ylim[0], xlim[1] - xlim[0], ylim[1] - ylim[0]]
rectpatch, connects = self.indicate_inset(
rect, inset_ax, **kwargs)
return rectpatch, connects
@docstring.dedent_interpd
def secondary_xaxis(self, location, *, functions=None, **kwargs):
"""
Add a second x-axis to this axes.
For example if we want to have a second scale for the data plotted on
the xaxis.
%(_secax_docstring)s
Examples
--------
The main axis shows frequency, and the secondary axis shows period.
.. plot::
fig, ax = plt.subplots()
ax.loglog(range(1, 360, 5), range(1, 360, 5))
ax.set_xlabel('frequency [Hz]')
def invert(x):
return 1 / x
secax = ax.secondary_xaxis('top', functions=(invert, invert))
secax.set_xlabel('Period [s]')
plt.show()
"""
if (location in ['top', 'bottom'] or isinstance(location, Number)):
secondary_ax = SecondaryAxis(self, 'x', location, functions,
**kwargs)
self.add_child_axes(secondary_ax)
return secondary_ax
else:
raise ValueError('secondary_xaxis location must be either '
'a float or "top"/"bottom"')
def secondary_yaxis(self, location, *, functions=None, **kwargs):
"""
Add a second y-axis to this axes.
For example if we want to have a second scale for the data plotted on
the yaxis.
%(_secax_docstring)s
Examples
--------
Add a secondary axes that converts from radians to degrees
.. plot::
fig, ax = plt.subplots()
ax.plot(range(1, 360, 5), range(1, 360, 5))
ax.set_ylabel('degrees')
secax = ax.secondary_yaxis('right', functions=(np.deg2rad,
np.rad2deg))
secax.set_ylabel('radians')
"""
if location in ['left', 'right'] or isinstance(location, Number):
secondary_ax = SecondaryAxis(self, 'y', location,
functions, **kwargs)
self.add_child_axes(secondary_ax)
return secondary_ax
else:
raise ValueError('secondary_yaxis location must be either '
'a float or "left"/"right"')
@cbook._delete_parameter("3.1", "withdash")
def text(self, x, y, s, fontdict=None, withdash=False, **kwargs):
"""
Add text to the axes.
Add the text *s* to the axes at location *x*, *y* in data coordinates.
Parameters
----------
x, y : scalars
The position to place the text. By default, this is in data
coordinates. The coordinate system can be changed using the
*transform* parameter.
s : str
The text.
fontdict : dictionary, optional, default: None
A dictionary to override the default text properties. If fontdict
is None, the defaults are determined by your rc parameters.
withdash : boolean, optional, default: False
Creates a `~matplotlib.text.TextWithDash` instance instead of a
`~matplotlib.text.Text` instance.
Returns
-------
text : `.Text`
The created `.Text` instance.
Other Parameters
----------------
**kwargs : `~matplotlib.text.Text` properties.
Other miscellaneous text parameters.
Examples
--------
Individual keyword arguments can be used to override any given
parameter::
>>> text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
>>> text(0.5, 0.5, 'matplotlib', horizontalalignment='center',
... verticalalignment='center', transform=ax.transAxes)
You can put a rectangular box around the text instance (e.g., to
set a background color) by using the keyword `bbox`. `bbox` is
a dictionary of `~matplotlib.patches.Rectangle`
properties. For example::
>>> text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
"""
if fontdict is None:
fontdict = {}
effective_kwargs = {
'verticalalignment': 'baseline',
'horizontalalignment': 'left',
'transform': self.transData,
'clip_on': False,
**fontdict,
**kwargs,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if (withdash
and withdash is not cbook.deprecation._deprecated_parameter):
t = mtext.TextWithDash(x, y, text=s)
else:
t = mtext.Text(x, y, text=s)
t.update(effective_kwargs)
t.set_clip_path(self.patch)
self._add_text(t)
return t
@docstring.dedent_interpd
def annotate(self, s, xy, *args, **kwargs):
a = mtext.Annotation(s, xy, *args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
if 'clip_on' in kwargs:
a.set_clip_path(self.patch)
self._add_text(a)
return a
annotate.__doc__ = mtext.Annotation.__init__.__doc__
#### Lines and spans
@docstring.dedent_interpd
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal line across the axis.
Parameters
----------
y : scalar, optional, default: 0
y position in data coordinates of the horizontal line.
xmin : scalar, optional, default: 0
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
xmax : scalar, optional, default: 1
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
Returns
-------
line : :class:`~matplotlib.lines.Line2D`
Other Parameters
----------------
**kwargs
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(_Line2D_docstr)s
See also
--------
hlines : Add horizontal lines in data coordinates.
axhspan : Add a horizontal span (rectangle) across the axis.
Examples
--------
* draw a thick red hline at 'y' = 0 that spans the xrange::
>>> axhline(linewidth=4, color='r')
* draw a default hline at 'y' = 1 that spans the xrange::
>>> axhline(y=1)
* draw a default hline at 'y' = .5 that spans the middle half of
the xrange::
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axhline generates its own transform.")
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info(ydata=y, kwargs=kwargs)
yy = self.convert_yunits(y)
scaley = (yy < ymin) or (yy > ymax)
trans = self.get_yaxis_transform(which='grid')
l = mlines.Line2D([xmin, xmax], [y, y], transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
@docstring.dedent_interpd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
Add a vertical line across the axes.
Parameters
----------
x : scalar, optional, default: 0
x position in data coordinates of the vertical line.
ymin : scalar, optional, default: 0
Should be between 0 and 1, 0 being the bottom of the plot, 1 the
top of the plot.
ymax : scalar, optional, default: 1
Should be between 0 and 1, 0 being the bottom of the plot, 1 the
top of the plot.
Returns
-------
line : :class:`~matplotlib.lines.Line2D`
Other Parameters
----------------
**kwargs
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(_Line2D_docstr)s
Examples
--------
* draw a thick red vline at *x* = 0 that spans the yrange::
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange::
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the middle half of
the yrange::
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
See also
--------
vlines : Add vertical lines in data coordinates.
axvspan : Add a vertical span (rectangle) across the axis.
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axvline generates its own transform.")
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info(xdata=x, kwargs=kwargs)
xx = self.convert_xunits(x)
scalex = (xx < xmin) or (xx > xmax)
trans = self.get_xaxis_transform(which='grid')
l = mlines.Line2D([x, x], [ymin, ymax], transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
@docstring.dedent_interpd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal span (rectangle) across the axis.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, e.g., with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Parameters
----------
ymin : float
Lower limit of the horizontal span in data units.
ymax : float
Upper limit of the horizontal span in data units.
xmin : float, optional, default: 0
Lower limit of the vertical span in axes (relative
0-1) units.
xmax : float, optional, default: 1
Upper limit of the vertical span in axes (relative
0-1) units.
Returns
-------
Polygon : `~matplotlib.patches.Polygon`
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Polygon` properties.
%(Polygon)s
See Also
--------
axvspan : Add a vertical span across the axes.
"""
trans = self.get_yaxis_transform(which='grid')
# process the unit information
self._process_unit_info([xmin, xmax], [ymin, ymax], kwargs=kwargs)
# first we need to strip away the units
xmin, xmax = self.convert_xunits([xmin, xmax])
ymin, ymax = self.convert_yunits([ymin, ymax])
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scalex=False)
return p
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
Add a vertical span (rectangle) across the axes.
Draw a vertical span (rectangle) from `xmin` to `xmax`. With
the default values of `ymin` = 0 and `ymax` = 1. This always
spans the yrange, regardless of the ylim settings, even if you
change them, e.g., with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the x location is in data coordinates.
Parameters
----------
xmin : scalar
Number indicating the first X-axis coordinate of the vertical
span rectangle in data units.
xmax : scalar
Number indicating the second X-axis coordinate of the vertical
span rectangle in data units.
ymin : scalar, optional
Number indicating the first Y-axis coordinate of the vertical
span rectangle in relative Y-axis units (0-1). Default to 0.
ymax : scalar, optional
Number indicating the second Y-axis coordinate of the vertical
span rectangle in relative Y-axis units (0-1). Default to 1.
Returns
-------
rectangle : matplotlib.patches.Polygon
Vertical span (rectangle) from (xmin, ymin) to (xmax, ymax).
Other Parameters
----------------
**kwargs
Optional parameters are properties of the class
matplotlib.patches.Polygon.
See Also
--------
axhspan : Add a horizontal span across the axes.
Examples
--------
Draw a vertical, green, translucent rectangle from x = 1.25 to
x = 1.55 that spans the yrange of the axes.
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
"""
trans = self.get_xaxis_transform(which='grid')
# process the unit information
self._process_unit_info([xmin, xmax], [ymin, ymax], kwargs=kwargs)
# first we need to strip away the units
xmin, xmax = self.convert_xunits([xmin, xmax])
ymin, ymax = self.convert_yunits([ymin, ymax])
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scaley=False)
return p
@_preprocess_data(replace_names=["y", "xmin", "xmax", "colors"],
label_namer="y")
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Parameters
----------
y : scalar or sequence of scalar
y-indexes where to plot the lines.
xmin, xmax : scalar or 1D array_like
Respective beginning and end of each line. If scalars are
provided, all lines will have same length.
colors : array_like of colors, optional, default: 'k'
linestyles : {'solid', 'dashed', 'dashdot', 'dotted'}, optional
label : string, optional, default: ''
Returns
-------
lines : `~matplotlib.collections.LineCollection`
Other Parameters
----------------
**kwargs : `~matplotlib.collections.LineCollection` properties.
See also
--------
vlines : vertical lines
axhline: horizontal line across the axes
"""
# We do the conversion first since not all unitized data is uniform
# process the unit information
self._process_unit_info([xmin, xmax], y, kwargs=kwargs)
y = self.convert_yunits(y)
xmin = self.convert_xunits(xmin)
xmax = self.convert_xunits(xmax)
if not np.iterable(y):
y = [y]
if not np.iterable(xmin):
xmin = [xmin]
if not np.iterable(xmax):
xmax = [xmax]
y, xmin, xmax = cbook.delete_masked_points(y, xmin, xmax)
y = np.ravel(y)
xmin = np.resize(xmin, y.shape)
xmax = np.resize(xmax, y.shape)
verts = [((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
lines = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(lines, autolim=False)
lines.update(kwargs)
if len(y) > 0:
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return lines
@_preprocess_data(replace_names=["x", "ymin", "ymax", "colors"],
label_namer="x")
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot vertical lines.
Plot vertical lines at each *x* from *ymin* to *ymax*.
Parameters
----------
x : scalar or 1D array_like
x-indexes where to plot the lines.
ymin, ymax : scalar or 1D array_like
Respective beginning and end of each line. If scalars are
provided, all lines will have same length.
colors : array_like of colors, optional, default: 'k'
linestyles : {'solid', 'dashed', 'dashdot', 'dotted'}, optional
label : string, optional, default: ''
Returns
-------
lines : `~matplotlib.collections.LineCollection`
Other Parameters
----------------
**kwargs : `~matplotlib.collections.LineCollection` properties.
See also
--------
hlines : horizontal lines
axvline: vertical line across the axes
"""
self._process_unit_info(xdata=x, ydata=[ymin, ymax], kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits(x)
ymin = self.convert_yunits(ymin)
ymax = self.convert_yunits(ymax)
if not np.iterable(x):
x = [x]
if not np.iterable(ymin):
ymin = [ymin]
if not np.iterable(ymax):
ymax = [ymax]
x, ymin, ymax = cbook.delete_masked_points(x, ymin, ymax)
x = np.ravel(x)
ymin = np.resize(ymin, x.shape)
ymax = np.resize(ymax, x.shape)
verts = [((thisx, thisymin), (thisx, thisymax))
for thisx, thisymin, thisymax in zip(x, ymin, ymax)]
lines = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(lines, autolim=False)
lines.update(kwargs)
if len(x) > 0:
minx = x.min()
maxx = x.max()
miny = min(ymin.min(), ymax.min())
maxy = max(ymin.max(), ymax.max())
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return lines
@_preprocess_data(replace_names=["positions", "lineoffsets",
"linelengths", "linewidths",
"colors", "linestyles"])
@docstring.dedent_interpd
def eventplot(self, positions, orientation='horizontal', lineoffsets=1,
linelengths=1, linewidths=None, colors=None,
linestyles='solid', **kwargs):
"""
Plot identical parallel lines at the given positions.
*positions* should be a 1D or 2D array-like object, with each row
corresponding to a row or column of lines.
This type of plot is commonly used in neuroscience for representing
neural events, where it is usually called a spike raster, dot raster,
or raster plot.
However, it is useful in any situation where you wish to show the
timing or position of multiple sets of discrete events, such as the
arrival times of people to a business on each day of the month or the
date of hurricanes each year of the last century.
Parameters
----------
positions : 1D or 2D array-like object
Each value is an event. If *positions* is a 2D array-like, each
row corresponds to a row or a column of lines (depending on the
*orientation* parameter).
orientation : {'horizontal', 'vertical'}, optional
Controls the direction of the event collections:
- 'horizontal' : the lines are arranged horizontally in rows,
and are vertical.
- 'vertical' : the lines are arranged vertically in columns,
and are horizontal.
lineoffsets : scalar or sequence of scalars, optional, default: 1
The offset of the center of the lines from the origin, in the
direction orthogonal to *orientation*.
linelengths : scalar or sequence of scalars, optional, default: 1
The total height of the lines (i.e. the lines stretches from
``lineoffset - linelength/2`` to ``lineoffset + linelength/2``).
linewidths : scalar, scalar sequence or None, optional, default: None
The line width(s) of the event lines, in points. If it is None,
defaults to its rcParams setting.
colors : color, sequence of colors or None, optional, default: None
The color(s) of the event lines. If it is None, defaults to its
rcParams setting.
linestyles : str or tuple or a sequence of such values, optional
Default is 'solid'. Valid strings are ['solid', 'dashed',
'dashdot', 'dotted', '-', '--', '-.', ':']. Dash tuples
should be of the form::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
**kwargs : optional
Other keyword arguments are line collection properties. See
:class:`~matplotlib.collections.LineCollection` for a list of
the valid properties.
Returns
-------
list : A list of :class:`~.collections.EventCollection` objects.
Contains the :class:`~.collections.EventCollection` that
were added.
Notes
-----
For *linelengths*, *linewidths*, *colors*, and *linestyles*, if only
a single value is given, that value is applied to all lines. If an
array-like is given, it must have the same length as *positions*, and
each value will be applied to the corresponding row of the array.
Examples
--------
.. plot:: gallery/lines_bars_and_markers/eventplot_demo.py
"""
self._process_unit_info(xdata=positions,
ydata=[lineoffsets, linelengths],
kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
positions = self.convert_xunits(positions)
lineoffsets = self.convert_yunits(lineoffsets)
linelengths = self.convert_yunits(linelengths)
if not np.iterable(positions):
positions = [positions]
elif any(np.iterable(position) for position in positions):
positions = [np.asanyarray(position) for position in positions]
else:
positions = [np.asanyarray(positions)]
if len(positions) == 0:
return []
# prevent 'singular' keys from **kwargs dict from overriding the effect
# of 'plural' keyword arguments (e.g. 'color' overriding 'colors')
colors = cbook.local_over_kwdict(colors, kwargs, 'color')
linewidths = cbook.local_over_kwdict(linewidths, kwargs, 'linewidth')
linestyles = cbook.local_over_kwdict(linestyles, kwargs, 'linestyle')
if not np.iterable(lineoffsets):
lineoffsets = [lineoffsets]
if not np.iterable(linelengths):
linelengths = [linelengths]
if not np.iterable(linewidths):
linewidths = [linewidths]
if not np.iterable(colors):
colors = [colors]
if hasattr(linestyles, 'lower') or not np.iterable(linestyles):
linestyles = [linestyles]
lineoffsets = np.asarray(lineoffsets)
linelengths = np.asarray(linelengths)
linewidths = np.asarray(linewidths)
if len(lineoffsets) == 0:
lineoffsets = [None]
if len(linelengths) == 0:
linelengths = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(colors) == 0:
colors = [None]
try:
# Early conversion of the colors into RGBA values to take care
# of cases like colors='0.5' or colors='C1'. (Issue #8193)
colors = mcolors.to_rgba_array(colors)
except ValueError:
# Will fail if any element of *colors* is None. But as long
# as len(colors) == 1 or len(positions), the rest of the
# code should process *colors* properly.
pass
if len(lineoffsets) == 1 and len(positions) != 1:
lineoffsets = np.tile(lineoffsets, len(positions))
lineoffsets[0] = 0
lineoffsets = np.cumsum(lineoffsets)
if len(linelengths) == 1:
linelengths = np.tile(linelengths, len(positions))
if len(linewidths) == 1:
linewidths = np.tile(linewidths, len(positions))
if len(colors) == 1:
colors = list(colors)
colors = colors * len(positions)
if len(linestyles) == 1:
linestyles = [linestyles] * len(positions)
if len(lineoffsets) != len(positions):
raise ValueError('lineoffsets and positions are unequal sized '
'sequences')
if len(linelengths) != len(positions):
raise ValueError('linelengths and positions are unequal sized '
'sequences')
if len(linewidths) != len(positions):
raise ValueError('linewidths and positions are unequal sized '
'sequences')
if len(colors) != len(positions):
raise ValueError('colors and positions are unequal sized '
'sequences')
if len(linestyles) != len(positions):
raise ValueError('linestyles and positions are unequal sized '
'sequences')
colls = []
for position, lineoffset, linelength, linewidth, color, linestyle in \
zip(positions, lineoffsets, linelengths, linewidths,
colors, linestyles):
coll = mcoll.EventCollection(position,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
colls.append(coll)
if len(positions) > 0:
# try to get min/max
min_max = [(np.min(_p), np.max(_p)) for _p in positions
if len(_p) > 0]
# if we have any non-empty positions, try to autoscale
if len(min_max) > 0:
mins, maxes = zip(*min_max)
minpos = np.min(mins)
maxpos = np.max(maxes)
minline = (lineoffsets - linelengths).min()
maxline = (lineoffsets + linelengths).max()
if (orientation is not None and
orientation.lower() == "vertical"):
corners = (minline, minpos), (maxline, maxpos)
else: # "horizontal", None or "none" (see EventCollection)
corners = (minpos, minline), (maxpos, maxline)
self.update_datalim(corners)
self.autoscale_view()
return colls
#### Basic plotting
# Uses a custom implementation of data-kwarg handling in
# _process_plot_var_args.
@docstring.dedent_interpd
def plot(self, *args, scalex=True, scaley=True, data=None, **kwargs):
"""
Plot y versus x as lines and/or markers.
Call signatures::
plot([x], y, [fmt], *, data=None, **kwargs)
plot([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
The coordinates of the points or line nodes are given by *x*, *y*.
The optional parameter *fmt* is a convenient way for defining basic
formatting like color, marker and linestyle. It's a shortcut string
notation described in the *Notes* section below.
>>> plot(x, y) # plot x and y using default line style and color
>>> plot(x, y, 'bo') # plot x and y using blue circle markers
>>> plot(y) # plot y using x as index array 0..N-1
>>> plot(y, 'r+') # ditto, but with red plusses
You can use `.Line2D` properties as keyword arguments for more
control on the appearance. Line properties and *fmt* can be mixed.
The following two calls yield identical results:
>>> plot(x, y, 'go--', linewidth=2, markersize=12)
>>> plot(x, y, color='green', marker='o', linestyle='dashed',
... linewidth=2, markersize=12)
When conflicting with *fmt*, keyword arguments take precedence.
**Plotting labelled data**
There's a convenient way for plotting objects with labelled data (i.e.
data that can be accessed by index ``obj['y']``). Instead of giving
the data in *x* and *y*, you can provide the object in the *data*
parameter and just give the labels for *x* and *y*::
>>> plot('xlabel', 'ylabel', data=obj)
All indexable objects are supported. This could e.g. be a `dict`, a
`pandas.DataFame` or a structured numpy array.
**Plotting multiple sets of data**
There are various ways to plot multiple sets of data.
- The most straight forward way is just to call `plot` multiple times.
Example:
>>> plot(x1, y1, 'bo')
>>> plot(x2, y2, 'go')
- Alternatively, if your data is already a 2d array, you can pass it
directly to *x*, *y*. A separate data set will be drawn for every
column.
Example: an array ``a`` where the first column represents the *x*
values and the other columns are the *y* columns::
>>> plot(a[0], a[1:])
- The third way is to specify multiple sets of *[x]*, *y*, *[fmt]*
groups::
>>> plot(x1, y1, 'g^', x2, y2, 'g-')
In this case, any additional keyword argument applies to all
datasets. Also this syntax cannot be combined with the *data*
parameter.
By default, each line is assigned a different style specified by a
'style cycle'. The *fmt* and line property parameters are only
necessary if you want explicit deviations from these defaults.
Alternatively, you can also change the style cycle using the
'axes.prop_cycle' rcParam.
Parameters
----------
x, y : array-like or scalar
The horizontal / vertical coordinates of the data points.
*x* values are optional and default to `range(len(y))`.
Commonly, these parameters are 1D arrays.
They can also be scalars, or two-dimensional (in that case, the
columns represent separate data sets).
These arguments cannot be passed as keywords.
fmt : str, optional
A format string, e.g. 'ro' for red circles. See the *Notes*
section for a full description of the format strings.
Format strings are just an abbreviation for quickly setting
basic line properties. All of these and more can also be
controlled by keyword arguments.
This argument cannot be passed as keyword.
data : indexable object, optional
An object with labelled data. If given, provide the label names to
plot in *x* and *y*.
.. note::
Technically there's a slight ambiguity in calls where the
second label is a valid *fmt*. `plot('n', 'o', data=obj)`
could be `plt(x, y)` or `plt(y, fmt)`. In such cases,
the former interpretation is chosen, but a warning is issued.
You may suppress the warning by adding an empty format string
`plot('n', 'o', '', data=obj)`.
Other Parameters
----------------
scalex, scaley : bool, optional, default: True
These parameters determined if the view limits are adapted to
the data limits. The values are passed on to `autoscale_view`.
**kwargs : `.Line2D` properties, optional
*kwargs* are used to specify properties like a line label (for
auto legends), linewidth, antialiasing, marker face color.
Example::
>>> plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
>>> plot([1,2,3], [1,4,9], 'rs', label='line 2')
If you make multiple lines with one plot command, the kwargs
apply to all those lines.
Here is a list of available `.Line2D` properties:
%(_Line2D_docstr)s
Returns
-------
lines
A list of `.Line2D` objects representing the plotted data.
See Also
--------
scatter : XY scatter plot with markers of varying size and/or color (
sometimes also called bubble chart).
Notes
-----
**Format Strings**
A format string consists of a part for color, marker and line::
fmt = '[marker][line][color]'
Each of them is optional. If not provided, the value from the style
cycle is used. Exception: If ``line`` is given, but no ``marker``,
the data will be a line without markers.
Other combinations such as ``[color][marker][line]`` are also
supported, but note that their parsing may be ambiguous.
**Markers**
============= ===============================
character description
============= ===============================
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
============= ===============================
**Line Styles**
============= ===============================
character description
============= ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
============= ===============================
Example format strings::
'b' # blue markers with default shape
'or' # red circles
'-g' # green solid line
'--' # dashed line with default color
'^k:' # black triangle_up markers connected by a dotted line
**Colors**
The supported color abbreviations are the single letter codes
============= ===============================
character color
============= ===============================
``'b'`` blue
``'g'`` green
``'r'`` red
``'c'`` cyan
``'m'`` magenta
``'y'`` yellow
``'k'`` black
``'w'`` white
============= ===============================
and the ``'CN'`` colors that index into the default property cycle.
If the color is the only part of the format string, you can
additionally use any `matplotlib.colors` spec, e.g. full names
(``'green'``) or hex strings (``'#008000'``).
"""
kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
lines = [*self._get_lines(*args, data=data, **kwargs)]
for line in lines:
self.add_line(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
@_preprocess_data(replace_names=["x", "y"], label_namer="y")
@docstring.dedent_interpd
def plot_date(self, x, y, fmt='o', tz=None, xdate=True, ydate=False,
**kwargs):
"""
Plot data that contains dates.
Similar to `.plot`, this plots *y* vs. *x* as lines or markers.
However, the axis labels are formatted as dates depending on *xdate*
and *ydate*.
Parameters
----------
x, y : array-like
The coordinates of the data points. If *xdate* or *ydate* is
*True*, the respective values *x* or *y* are interpreted as
:ref:`Matplotlib dates <date-format>`.
fmt : str, optional
The plot format string. For details, see the corresponding
parameter in `.plot`.
tz : [ *None* | timezone string | :class:`tzinfo` instance]
The time zone to use in labeling dates. If *None*, defaults to
rcParam ``timezone``.
xdate : bool, optional, default: True
If *True*, the *x*-axis will be interpreted as Matplotlib dates.
ydate : bool, optional, default: False
If *True*, the *y*-axis will be interpreted as Matplotlib dates.
Returns
-------
lines
A list of `~.Line2D` objects representing the plotted data.
Other Parameters
----------------
**kwargs
Keyword arguments control the :class:`~matplotlib.lines.Line2D`
properties:
%(_Line2D_docstr)s
See Also
--------
matplotlib.dates : Helper functions on dates.
matplotlib.dates.date2num : Convert dates to num.
matplotlib.dates.num2date : Convert num to dates.
matplotlib.dates.drange : Create an equally spaced sequence of dates.
Notes
-----
If you are using custom date tickers and formatters, it may be
necessary to set the formatters/locators after the call to
`.plot_date`. `.plot_date` will set the default tick locator to
`.AutoDateLocator` (if the tick locator is not already set to a
`.DateLocator` instance) and the default tick formatter to
`.AutoDateFormatter` (if the tick formatter is not already set to a
`.DateFormatter` instance).
"""
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
ret = self.plot(x, y, fmt, **kwargs)
self.autoscale_view()
return ret
# @_preprocess_data() # let 'plot' do the unpacking..
@docstring.dedent_interpd
def loglog(self, *args, **kwargs):
"""
Make a plot with log scaling on both the x and y axis.
Call signatures::
loglog([x], y, [fmt], data=None, **kwargs)
loglog([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
This is just a thin wrapper around `.plot` which additionally changes
both the x-axis and the y-axis to log scaling. All of the concepts and
parameters of plot can be used here as well.
The additional parameters *basex/y*, *subsx/y* and *nonposx/y* control
the x/y-axis properties. They are just forwarded to `.Axes.set_xscale`
and `.Axes.set_yscale`.
Parameters
----------
basex, basey : scalar, optional, default 10
Base of the x/y logarithm.
subsx, subsy : sequence, optional
The location of the minor x/y ticks. If *None*, reasonable
locations are automatically chosen depending on the number of
decades in the plot.
See `.Axes.set_xscale` / `.Axes.set_yscale` for details.
nonposx, nonposy : {'mask', 'clip'}, optional, default 'mask'
Non-positive values in x or y can be masked as invalid, or clipped
to a very small positive number.
Returns
-------
lines
A list of `~.Line2D` objects representing the plotted data.
Other Parameters
----------------
**kwargs
All parameters supported by `.plot`.
"""
dx = {k: kwargs.pop(k) for k in ['basex', 'subsx', 'nonposx']
if k in kwargs}
dy = {k: kwargs.pop(k) for k in ['basey', 'subsy', 'nonposy']
if k in kwargs}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
l = self.plot(*args, **kwargs)
return l
# @_preprocess_data() # let 'plot' do the unpacking..
@docstring.dedent_interpd
def semilogx(self, *args, **kwargs):
"""
Make a plot with log scaling on the x axis.
Call signatures::
semilogx([x], y, [fmt], data=None, **kwargs)
semilogx([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
This is just a thin wrapper around `.plot` which additionally changes
the x-axis to log scaling. All of the concepts and parameters of plot
can be used here as well.
The additional parameters *basex*, *subsx* and *nonposx* control the
x-axis properties. They are just forwarded to `.Axes.set_xscale`.
Parameters
----------
basex : scalar, optional, default 10
Base of the x logarithm.
subsx : array_like, optional
The location of the minor xticks. If *None*, reasonable locations
are automatically chosen depending on the number of decades in the
plot. See `.Axes.set_xscale` for details.
nonposx : {'mask', 'clip'}, optional, default 'mask'
Non-positive values in x can be masked as invalid, or clipped to a
very small positive number.
Returns
-------
lines
A list of `~.Line2D` objects representing the plotted data.
Other Parameters
----------------
**kwargs
All parameters supported by `.plot`.
"""
d = {k: kwargs.pop(k) for k in ['basex', 'subsx', 'nonposx']
if k in kwargs}
self.set_xscale('log', **d)
l = self.plot(*args, **kwargs)
return l
# @_preprocess_data() # let 'plot' do the unpacking..
@docstring.dedent_interpd
def semilogy(self, *args, **kwargs):
"""
Make a plot with log scaling on the y axis.
Call signatures::
semilogy([x], y, [fmt], data=None, **kwargs)
semilogy([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
This is just a thin wrapper around `.plot` which additionally changes
the y-axis to log scaling. All of the concepts and parameters of plot
can be used here as well.
The additional parameters *basey*, *subsy* and *nonposy* control the
y-axis properties. They are just forwarded to `.Axes.set_yscale`.
Parameters
----------
basey : scalar, optional, default 10
Base of the y logarithm.
subsy : array_like, optional
The location of the minor yticks. If *None*, reasonable locations
are automatically chosen depending on the number of decades in the
plot. See `.Axes.set_yscale` for details.
nonposy : {'mask', 'clip'}, optional, default 'mask'
Non-positive values in y can be masked as invalid, or clipped to a
very small positive number.
Returns
-------
lines
A list of `~.Line2D` objects representing the plotted data.
Other Parameters
----------------
**kwargs
All parameters supported by `.plot`.
"""
d = {k: kwargs.pop(k) for k in ['basey', 'subsy', 'nonposy']
if k in kwargs}
self.set_yscale('log', **d)
l = self.plot(*args, **kwargs)
return l
@_preprocess_data(replace_names=["x"], label_namer="x")
def acorr(self, x, **kwargs):
"""
Plot the autocorrelation of *x*.
Parameters
----------
x : sequence of scalar
detrend : callable, optional, default: `mlab.detrend_none`
*x* is detrended by the *detrend* callable. Default is no
normalization.
normed : bool, optional, default: True
If ``True``, input vectors are normalised to unit length.
usevlines : bool, optional, default: True
If ``True``, `Axes.vlines` is used to plot the vertical lines from
the origin to the acorr. Otherwise, `Axes.plot` is used.
maxlags : int, optional, default: 10
Number of lags to show. If ``None``, will return all
``2 * len(x) - 1`` lags.
Returns
-------
lags : array (length ``2*maxlags+1``)
lag vector.
c : array (length ``2*maxlags+1``)
auto correlation vector.
line : `.LineCollection` or `.Line2D`
`.Artist` added to the axes of the correlation.
`.LineCollection` if *usevlines* is True
`.Line2D` if *usevlines* is False
b : `.Line2D` or None
Horizontal line at 0 if *usevlines* is True
None *usevlines* is False
Other Parameters
----------------
linestyle : `.Line2D` property, optional, default: None
Only used if usevlines is ``False``.
marker : str, optional, default: 'o'
Notes
-----
The cross correlation is performed with :func:`numpy.correlate` with
``mode = "full"``.
"""
return self.xcorr(x, x, **kwargs)
@_preprocess_data(replace_names=["x", "y"], label_namer="y")
def xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs):
r"""
Plot the cross correlation between *x* and *y*.
The correlation with lag k is defined as
:math:`\sum_n x[n+k] \cdot y^*[n]`, where :math:`y^*` is the complex
conjugate of :math:`y`.
Parameters
----------
x : sequence of scalars of length n
y : sequence of scalars of length n
detrend : callable, optional, default: `mlab.detrend_none`
*x* is detrended by the *detrend* callable. Default is no
normalization.
normed : bool, optional, default: True
If ``True``, input vectors are normalised to unit length.
usevlines : bool, optional, default: True
If ``True``, `Axes.vlines` is used to plot the vertical lines from
the origin to the acorr. Otherwise, `Axes.plot` is used.
maxlags : int, optional
Number of lags to show. If None, will return all ``2 * len(x) - 1``
lags. Default is 10.
Returns
-------
lags : array (length ``2*maxlags+1``)
lag vector.
c : array (length ``2*maxlags+1``)
auto correlation vector.
line : `.LineCollection` or `.Line2D`
`.Artist` added to the axes of the correlation
`.LineCollection` if *usevlines* is True
`.Line2D` if *usevlines* is False
b : `.Line2D` or None
Horizontal line at 0 if *usevlines* is True
None *usevlines* is False
Other Parameters
----------------
linestyle : `.Line2D` property, optional
Only used if usevlines is ``False``.
marker : string, optional
Default is 'o'.
Notes
-----
The cross correlation is performed with :func:`numpy.correlate` with
``mode = "full"``.
"""
Nx = len(x)
if Nx != len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
correls = np.correlate(x, y, mode="full")
if normed:
correls /= np.sqrt(np.dot(x, x) * np.dot(y, y))
if maxlags is None:
maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maxlags must be None or strictly '
'positive < %d' % Nx)
lags = np.arange(-maxlags, maxlags + 1)
correls = correls[Nx - 1 - maxlags:Nx + maxlags]
if usevlines:
a = self.vlines(lags, [0], correls, **kwargs)
# Make label empty so only vertical lines get a legend entry
kwargs.pop('label', '')
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, correls, **kwargs)
b = None
return lags, correls, a, b
#### Specialized plotting
# @_preprocess_data() # let 'plot' do the unpacking..
def step(self, x, y, *args, where='pre', data=None, **kwargs):
"""
Make a step plot.
Call signatures::
step(x, y, [fmt], *, data=None, where='pre', **kwargs)
step(x, y, [fmt], x2, y2, [fmt2], ..., *, where='pre', **kwargs)
This is just a thin wrapper around `.plot` which changes some
formatting options. Most of the concepts and parameters of plot can be
used here as well.
Parameters
----------
x : array_like
1-D sequence of x positions. It is assumed, but not checked, that
it is uniformly increasing.
y : array_like
1-D sequence of y levels.
fmt : str, optional
A format string, e.g. 'g' for a green line. See `.plot` for a more
detailed description.
Note: While full format strings are accepted, it is recommended to
only specify the color. Line styles are currently ignored (use
the keyword argument *linestyle* instead). Markers are accepted
and plotted on the given positions, however, this is a rarely
needed feature for step plots.
data : indexable object, optional
An object with labelled data. If given, provide the label names to
plot in *x* and *y*.
where : {'pre', 'post', 'mid'}, optional, default 'pre'
Define where the steps should be placed:
- 'pre': The y value is continued constantly to the left from
every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
value ``y[i]``.
- 'post': The y value is continued constantly to the right from
every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
value ``y[i]``.
- 'mid': Steps occur half-way between the *x* positions.
Returns
-------
lines
A list of `.Line2D` objects representing the plotted data.
Other Parameters
----------------
**kwargs
Additional parameters are the same as those for `.plot`.
Notes
-----
.. [notes section required to get data note injection right]
"""
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['drawstyle'] = 'steps-' + where
return self.plot(x, y, *args, data=data, **kwargs)
@staticmethod
def _convert_dx(dx, x0, xconv, convert):
"""
Small helper to do logic of width conversion flexibly.
*dx* and *x0* have units, but *xconv* has already been converted
to unitless (and is an ndarray). This allows the *dx* to have units
that are different from *x0*, but are still accepted by the
``__add__`` operator of *x0*.
"""
# x should be an array...
assert type(xconv) is np.ndarray
if xconv.size == 0:
# xconv has already been converted, but maybe empty...
return convert(dx)
try:
# attempt to add the width to x0; this works for
# datetime+timedelta, for instance
# only use the first element of x and x0. This saves
# having to be sure addition works across the whole
# vector. This is particularly an issue if
# x0 and dx are lists so x0 + dx just concatenates the lists.
# We can't just cast x0 and dx to numpy arrays because that
# removes the units from unit packages like `pint` that
# wrap numpy arrays.
try:
x0 = x0[0]
except (TypeError, IndexError, KeyError):
x0 = x0
try:
x = xconv[0]
except (TypeError, IndexError, KeyError):
x = xconv
delist = False
if not np.iterable(dx):
dx = [dx]
delist = True
dx = [convert(x0 + ddx) - x for ddx in dx]
if delist:
dx = dx[0]
except (ValueError, TypeError, AttributeError):
# if the above fails (for any reason) just fallback to what
# we do by default and convert dx by itself.
dx = convert(dx)
return dx
@_preprocess_data()
@docstring.dedent_interpd
def bar(self, x, height, width=0.8, bottom=None, *, align="center",
**kwargs):
r"""
Make a bar plot.
The bars are positioned at *x* with the given *align*\ment. Their
dimensions are given by *width* and *height*. The vertical baseline
is *bottom* (default 0).
Each of *x*, *height*, *width*, and *bottom* may either be a scalar
applying to all bars, or it may be a sequence of length N providing a
separate value for each bar.
Parameters
----------
x : sequence of scalars
The x coordinates of the bars. See also *align* for the
alignment of the bars to the coordinates.
height : scalar or sequence of scalars
The height(s) of the bars.
width : scalar or array-like, optional
The width(s) of the bars (default: 0.8).
bottom : scalar or array-like, optional
The y coordinate(s) of the bars bases (default: 0).
align : {'center', 'edge'}, optional, default: 'center'
Alignment of the bars to the *x* coordinates:
- 'center': Center the base on the *x* positions.
- 'edge': Align the left edges of the bars with the *x* positions.
To align the bars on the right edge pass a negative *width* and
``align='edge'``.
Returns
-------
container : `.BarContainer`
Container with all the bars and optionally errorbars.
Other Parameters
----------------
color : scalar or array-like, optional
The colors of the bar faces.
edgecolor : scalar or array-like, optional
The colors of the bar edges.
linewidth : scalar or array-like, optional
Width of the bar edge(s). If 0, don't draw edges.
tick_label : string or array-like, optional
The tick labels of the bars.
Default: None (Use default numeric labels.)
xerr, yerr : scalar or array-like of shape(N,) or shape(2,N), optional
If not *None*, add horizontal / vertical errorbars to the bar tips.
The values are +/- sizes relative to the data:
- scalar: symmetric +/- values for all bars
- shape(N,): symmetric +/- values for each bar
- shape(2,N): Separate - and + values for each bar. First row
contains the lower errors, the second row contains the
upper errors.
- *None*: No errorbar. (Default)
See :doc:`/gallery/statistics/errorbar_features`
for an example on the usage of ``xerr`` and ``yerr``.
ecolor : scalar or array-like, optional, default: 'black'
The line color of the errorbars.
capsize : scalar, optional
The length of the error bar caps in points.
Default: None, which will take the value from
:rc:`errorbar.capsize`.
error_kw : dict, optional
Dictionary of kwargs to be passed to the `~.Axes.errorbar`
method. Values of *ecolor* or *capsize* defined here take
precedence over the independent kwargs.
log : bool, optional, default: False
If *True*, set the y-axis to be log scale.
orientation : {'vertical', 'horizontal'}, optional
*This is for internal use only.* Please use `barh` for
horizontal bar plots. Default: 'vertical'.
See also
--------
barh: Plot a horizontal bar plot.
Notes
-----
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: *xerr* and *yerr* are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
"""
kwargs = cbook.normalize_kwargs(kwargs, mpatches.Patch)
color = kwargs.pop('color', None)
if color is None:
color = self._get_patches_for_fill.get_next_color()
edgecolor = kwargs.pop('edgecolor', None)
linewidth = kwargs.pop('linewidth', None)
# Because xerr and yerr will be passed to errorbar, most dimension
# checking and processing will be left to the errorbar method.
xerr = kwargs.pop('xerr', None)
yerr = kwargs.pop('yerr', None)
error_kw = kwargs.pop('error_kw', {})
ecolor = kwargs.pop('ecolor', 'k')
capsize = kwargs.pop('capsize', rcParams["errorbar.capsize"])
error_kw.setdefault('ecolor', ecolor)
error_kw.setdefault('capsize', capsize)
orientation = kwargs.pop('orientation', 'vertical')
log = kwargs.pop('log', False)
label = kwargs.pop('label', '')
tick_labels = kwargs.pop('tick_label', None)
adjust_ylim = False
adjust_xlim = False
y = bottom # Matches barh call signature.
if orientation == 'vertical':
if bottom is None:
if self.get_yscale() == 'log':
adjust_ylim = True
y = 0
elif orientation == 'horizontal':
if x is None:
if self.get_xscale() == 'log':
adjust_xlim = True
x = 0
if orientation == 'vertical':
self._process_unit_info(xdata=x, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log', nonposy='clip')
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=y, kwargs=kwargs)
if log:
self.set_xscale('log', nonposx='clip')
else:
raise ValueError('invalid orientation: %s' % orientation)
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
x0 = x
x = np.asarray(self.convert_xunits(x))
width = self._convert_dx(width, x0, x, self.convert_xunits)
if xerr is not None:
xerr = self._convert_dx(xerr, x0, x, self.convert_xunits)
if self.yaxis is not None:
y0 = y
y = np.asarray(self.convert_yunits(y))
height = self._convert_dx(height, y0, y, self.convert_yunits)
if yerr is not None:
yerr = self._convert_dx(yerr, y0, y, self.convert_yunits)
x, height, width, y, linewidth = np.broadcast_arrays(
# Make args iterable too.
np.atleast_1d(x), height, width, y, linewidth)
# Now that units have been converted, set the tick locations.
if orientation == 'vertical':
tick_label_axis = self.xaxis
tick_label_position = x
elif orientation == 'horizontal':
tick_label_axis = self.yaxis
tick_label_position = y
linewidth = itertools.cycle(np.atleast_1d(linewidth))
color = itertools.chain(itertools.cycle(mcolors.to_rgba_array(color)),
# Fallback if color == "none".
itertools.repeat('none'))
if edgecolor is None:
edgecolor = itertools.repeat(None)
else:
edgecolor = itertools.chain(
itertools.cycle(mcolors.to_rgba_array(edgecolor)),
# Fallback if edgecolor == "none".
itertools.repeat('none'))
# We will now resolve the alignment and really have
# left, bottom, width, height vectors
if align == 'center':
if orientation == 'vertical':
try:
left = x - width / 2
except TypeError as e:
raise TypeError(f'the dtypes of parameters x ({x.dtype}) '
f'and width ({width.dtype}) '
f'are incompatible') from e
bottom = y
elif orientation == 'horizontal':
try:
bottom = y - height / 2
except TypeError as e:
raise TypeError(f'the dtypes of parameters y ({y.dtype}) '
f'and height ({height.dtype}) '
f'are incompatible') from e
left = x
elif align == 'edge':
left = x
bottom = y
else:
raise ValueError('invalid alignment: %s' % align)
patches = []
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label='_nolegend_',
)
r.update(kwargs)
r.get_path()._interpolation_steps = 100
if orientation == 'vertical':
r.sticky_edges.y.append(b)
elif orientation == 'horizontal':
r.sticky_edges.x.append(l)
self.add_patch(r)
patches.append(r)
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
ex = [l + 0.5 * w for l, w in zip(left, width)]
ey = [b + h for b, h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
ex = [l + w for l, w in zip(left, width)]
ey = [b + 0.5 * h for b, h in zip(bottom, height)]
error_kw.setdefault("label", '_nolegend_')
errorbar = self.errorbar(ex, ey,
yerr=yerr, xerr=xerr,
fmt='none', **error_kw)
else:
errorbar = None
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = min(w for w in width if w > 0)
if xerr is not None:
xmin = xmin - np.max(xerr)
xmin = max(xmin * 0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = min(h for h in height if h > 0)
if yerr is not None:
ymin = ymin - np.max(yerr)
ymin = max(ymin * 0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
bar_container = BarContainer(patches, errorbar, label=label)
self.add_container(bar_container)
if tick_labels is not None:
tick_labels = np.broadcast_to(tick_labels, len(patches))
tick_label_axis.set_ticks(tick_label_position)
tick_label_axis.set_ticklabels(tick_labels)
return bar_container
@docstring.dedent_interpd
def barh(self, y, width, height=0.8, left=None, *, align="center",
**kwargs):
r"""
Make a horizontal bar plot.
The bars are positioned at *y* with the given *align*\ment. Their
dimensions are given by *width* and *height*. The horizontal baseline
is *left* (default 0).
Each of *y*, *width*, *height*, and *left* may either be a scalar
applying to all bars, or it may be a sequence of length N providing a
separate value for each bar.
Parameters
----------
y : scalar or array-like
The y coordinates of the bars. See also *align* for the
alignment of the bars to the coordinates.
width : scalar or array-like
The width(s) of the bars.
height : sequence of scalars, optional, default: 0.8
The heights of the bars.
left : sequence of scalars
The x coordinates of the left sides of the bars (default: 0).
align : {'center', 'edge'}, optional, default: 'center'
Alignment of the base to the *y* coordinates*:
- 'center': Center the bars on the *y* positions.
- 'edge': Align the bottom edges of the bars with the *y*
positions.
To align the bars on the top edge pass a negative *height* and
``align='edge'``.
Returns
-------
container : `.BarContainer`
Container with all the bars and optionally errorbars.
Other Parameters
----------------
color : scalar or array-like, optional
The colors of the bar faces.
edgecolor : scalar or array-like, optional
The colors of the bar edges.
linewidth : scalar or array-like, optional
Width of the bar edge(s). If 0, don't draw edges.
tick_label : string or array-like, optional
The tick labels of the bars.
Default: None (Use default numeric labels.)
xerr, yerr : scalar or array-like of shape(N,) or shape(2,N), optional
If not ``None``, add horizontal / vertical errorbars to the
bar tips. The values are +/- sizes relative to the data:
- scalar: symmetric +/- values for all bars
- shape(N,): symmetric +/- values for each bar
- shape(2,N): Separate - and + values for each bar. First row
contains the lower errors, the second row contains the
upper errors.
- *None*: No errorbar. (default)
See :doc:`/gallery/statistics/errorbar_features`
for an example on the usage of ``xerr`` and ``yerr``.
ecolor : scalar or array-like, optional, default: 'black'
The line color of the errorbars.
capsize : scalar, optional
The length of the error bar caps in points.
Default: None, which will take the value from
:rc:`errorbar.capsize`.
error_kw : dict, optional
Dictionary of kwargs to be passed to the `~.Axes.errorbar`
method. Values of *ecolor* or *capsize* defined here take
precedence over the independent kwargs.
log : bool, optional, default: False
If ``True``, set the x-axis to be log scale.
See also
--------
bar: Plot a vertical bar plot.
Notes
-----
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: *xerr* and *yerr* are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
"""
kwargs.setdefault('orientation', 'horizontal')
patches = self.bar(x=left, height=height, width=width, bottom=y,
align=align, **kwargs)
return patches
@_preprocess_data()
@docstring.dedent_interpd
def broken_barh(self, xranges, yrange, **kwargs):
"""
Plot a horizontal sequence of rectangles.
A rectangle is drawn for each element of *xranges*. All rectangles
have the same vertical position and size defined by *yrange*.
This is a convenience function for instantiating a
`.BrokenBarHCollection`, adding it to the axes and autoscaling the
view.
Parameters
----------
xranges : sequence of tuples (*xmin*, *xwidth*)
The x-positions and extends of the rectangles. For each tuple
(*xmin*, *xwidth*) a rectangle is drawn from *xmin* to *xmin* +
*xwidth*.
yrange : (*ymin*, *yheight*)
The y-position and extend for all the rectangles.
Other Parameters
----------------
**kwargs : :class:`.BrokenBarHCollection` properties
Each *kwarg* can be either a single argument applying to all
rectangles, e.g.::
facecolors='black'
or a sequence of arguments over which is cycled, e.g.::
facecolors=('black', 'blue')
would create interleaving black and blue rectangles.
Supported keywords:
%(BrokenBarHCollection)s
Returns
-------
collection : A :class:`~.collections.BrokenBarHCollection`
Notes
-----
.. [Notes section required for data comment. See #10189.]
"""
# process the unit information
if len(xranges):
xdata = cbook.safe_first_element(xranges)
else:
xdata = None
if len(yrange):
ydata = cbook.safe_first_element(yrange)
else:
ydata = None
self._process_unit_info(xdata=xdata,
ydata=ydata,
kwargs=kwargs)
xranges_conv = []
for xr in xranges:
if len(xr) != 2:
raise ValueError('each range in xrange must be a sequence '
'with two elements (i.e. an Nx2 array)')
# convert the absolute values, not the x and dx...
x_conv = np.asarray(self.convert_xunits(xr[0]))
x1 = self._convert_dx(xr[1], xr[0], x_conv, self.convert_xunits)
xranges_conv.append((x_conv, x1))
yrange_conv = self.convert_yunits(yrange)
col = mcoll.BrokenBarHCollection(xranges_conv, yrange_conv, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
@_preprocess_data()
def stem(self, *args, linefmt=None, markerfmt=None, basefmt=None, bottom=0,
label=None, use_line_collection=False):
"""
Create a stem plot.
A stem plot plots vertical lines at each *x* location from the baseline
to *y*, and places a marker there.
Call signature::
stem([x,] y, linefmt=None, markerfmt=None, basefmt=None)
The x-positions are optional. The formats may be provided either as
positional or as keyword-arguments.
Parameters
----------
x : array-like, optional
The x-positions of the stems. Default: (0, 1, ..., len(y) - 1).
y : array-like
The y-values of the stem heads.
linefmt : str, optional
A string defining the properties of the vertical lines. Usually,
this will be a color or a color and a linestyle:
========= =============
Character Line Style
========= =============
``'-'`` solid line
``'--'`` dashed line
``'-.'`` dash-dot line
``':'`` dotted line
========= =============
Default: 'C0-', i.e. solid line with the first color of the color
cycle.
Note: While it is technically possible to specify valid formats
other than color or color and linestyle (e.g. 'rx' or '-.'), this
is beyond the intention of the method and will most likely not
result in a reasonable reasonable plot.
markerfmt : str, optional
A string defining the properties of the markers at the stem heads.
Default: 'C0o', i.e. filled circles with the first color of the
color cycle.
basefmt : str, optional
A format string defining the properties of the baseline.
Default: 'C3-' ('C2-' in classic mode).
bottom : float, optional, default: 0
The y-position of the baseline.
label : str, optional, default: None
The label to use for the stems in legends.
use_line_collection : bool, optional, default: False
If ``True``, store and plot the stem lines as a
`~.collections.LineCollection` instead of individual lines. This
significantly increases performance, and will become the default
option in Matplotlib 3.3. If ``False``, defaults to the old
behavior of using a list of `.Line2D` objects.
Returns
-------
container : :class:`~matplotlib.container.StemContainer`
The container may be treated like a tuple
(*markerline*, *stemlines*, *baseline*)
Notes
-----
.. seealso::
The MATLAB function
`stem <http://www.mathworks.com/help/techdoc/ref/stem.html>`_
which inspired this method.
"""
if not 1 <= len(args) <= 5:
raise TypeError('stem expected between 1 and 5 positional '
'arguments, got {}'.format(args))
y = np.asarray(args[0])
args = args[1:]
# Try a second one
if not args:
x = np.arange(len(y))
else:
x = y
y = np.asarray(args[0], dtype=float)
args = args[1:]
self._process_unit_info(xdata=x, ydata=y)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# defaults for formats
if linefmt is None:
try:
# fallback to positional argument
linefmt = args[0]
except IndexError:
linecolor = 'C0'
linemarker = 'None'
linestyle = '-'
else:
linestyle, linemarker, linecolor = \
_process_plot_format(linefmt)
else:
linestyle, linemarker, linecolor = _process_plot_format(linefmt)
if markerfmt is None:
try:
# fallback to positional argument
markerfmt = args[1]
except IndexError:
markercolor = 'C0'
markermarker = 'o'
markerstyle = 'None'
else:
markerstyle, markermarker, markercolor = \
_process_plot_format(markerfmt)
else:
markerstyle, markermarker, markercolor = \
_process_plot_format(markerfmt)
if basefmt is None:
try:
# fallback to positional argument
basefmt = args[2]
except IndexError:
if rcParams['_internal.classic_mode']:
basecolor = 'C2'
else:
basecolor = 'C3'
basemarker = 'None'
basestyle = '-'
else:
basestyle, basemarker, basecolor = \
_process_plot_format(basefmt)
else:
basestyle, basemarker, basecolor = _process_plot_format(basefmt)
# New behaviour in 3.1 is to use a LineCollection for the stemlines
if use_line_collection:
stemlines = [((xi, bottom), (xi, yi)) for xi, yi in zip(x, y)]
if linestyle is None:
linestyle = rcParams['lines.linestyle']
stemlines = mcoll.LineCollection(stemlines, linestyles=linestyle,
colors=linecolor,
label='_nolegend_')
self.add_collection(stemlines)
# Old behaviour is to plot each of the lines individually
else:
cbook._warn_external(
'In Matplotlib 3.3 individual lines on a stem plot will be '
'added as a LineCollection instead of individual lines. '
'This significantly improves the performance of a stem plot. '
'To remove this warning and switch to the new behaviour, '
'set the "use_line_collection" keyword argument to True.')
stemlines = []
for xi, yi in zip(x, y):
l, = self.plot([xi, xi], [bottom, yi],
color=linecolor, linestyle=linestyle,
marker=linemarker, label="_nolegend_")
stemlines.append(l)
markerline, = self.plot(x, y, color=markercolor, linestyle=markerstyle,
marker=markermarker, label="_nolegend_")
baseline, = self.plot([np.min(x), np.max(x)], [bottom, bottom],
color=basecolor, linestyle=basestyle,
marker=basemarker, label="_nolegend_")
stem_container = StemContainer((markerline, stemlines, baseline),
label=label)
self.add_container(stem_container)
return stem_container
@_preprocess_data(replace_names=["x", "explode", "labels", "colors"])
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False, labeldistance=1.1,
startangle=None, radius=None, counterclock=True,
wedgeprops=None, textprops=None, center=(0, 0),
frame=False, rotatelabels=False):
"""
Plot a pie chart.
Make a pie chart of array *x*. The fractional area of each wedge is
given by ``x/sum(x)``. If ``sum(x) < 1``, then the values of *x* give
the fractional area directly and the array will not be normalized. The
resulting pie will have an empty wedge of size ``1 - sum(x)``.
The wedges are plotted counterclockwise, by default starting from the
x-axis.
Parameters
----------
x : array-like
The wedge sizes.
explode : array-like, optional, default: None
If not *None*, is a ``len(x)`` array which specifies the fraction
of the radius with which to offset each wedge.
labels : list, optional, default: None
A sequence of strings providing the labels for each wedge
colors : array-like, optional, default: None
A sequence of matplotlib color args through which the pie chart
will cycle. If *None*, will use the colors in the currently
active cycle.
autopct : None (default), string, or function, optional
If not *None*, is a string or function used to label the wedges
with their numeric value. The label will be placed inside the
wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
pctdistance : float, optional, default: 0.6
The ratio between the center of each pie slice and the start of
the text generated by *autopct*. Ignored if *autopct* is *None*.
shadow : bool, optional, default: False
Draw a shadow beneath the pie.
labeldistance : float or None, optional, default: 1.1
The radial distance at which the pie labels are drawn.
If set to ``None``, label are not drawn, but are stored for use in
``legend()``
startangle : float, optional, default: None
If not *None*, rotates the start of the pie chart by *angle*
degrees counterclockwise from the x-axis.
radius : float, optional, default: None
The radius of the pie, if *radius* is *None* it will be set to 1.
counterclock : bool, optional, default: True
Specify fractions direction, clockwise or counterclockwise.
wedgeprops : dict, optional, default: None
Dict of arguments passed to the wedge objects making the pie.
For example, you can pass in ``wedgeprops = {'linewidth': 3}``
to set the width of the wedge border lines equal to 3.
For more details, look at the doc/arguments of the wedge object.
By default ``clip_on=False``.
textprops : dict, optional, default: None
Dict of arguments to pass to the text objects.
center : list of float, optional, default: (0, 0)
Center position of the chart. Takes value (0, 0) or is a sequence
of 2 scalars.
frame : bool, optional, default: False
Plot axes frame with the chart if true.
rotatelabels : bool, optional, default: False
Rotate each label to the angle of the corresponding slice if true.
Returns
-------
patches : list
A sequence of :class:`matplotlib.patches.Wedge` instances
texts : list
A list of the label :class:`matplotlib.text.Text` instances.
autotexts : list
A list of :class:`~matplotlib.text.Text` instances for the numeric
labels. This will only be returned if the parameter *autopct* is
not *None*.
Notes
-----
The pie chart will probably look best if the figure and axes are
square, or the Axes aspect is equal.
This method sets the aspect ratio of the axis to "equal".
The axes aspect ratio can be controlled with `Axes.set_aspect`.
"""
self.set_aspect('equal')
# The use of float32 is "historical", but can't be changed without
# regenerating the test baselines.
x = np.asarray(x, np.float32)
if x.ndim != 1 and x.squeeze().ndim <= 1:
cbook.warn_deprecated(
"3.1", message="Non-1D inputs to pie() are currently "
"squeeze()d, but this behavior is deprecated since %(since)s "
"and will be removed %(removal)s; pass a 1D array instead.")
x = np.atleast_1d(x.squeeze())
sx = x.sum()
if sx > 1:
x = x / sx
if labels is None:
labels = [''] * len(x)
if explode is None:
explode = [0] * len(x)
if len(x) != len(labels):
raise ValueError("'label' must be of length 'x'")
if len(x) != len(explode):
raise ValueError("'explode' must be of length 'x'")
if colors is None:
get_next_color = self._get_patches_for_fill.get_next_color
else:
color_cycle = itertools.cycle(colors)
def get_next_color():
return next(color_cycle)
if radius is None:
radius = 1
# Starting theta1 is the start fraction of the circle
if startangle is None:
theta1 = 0
else:
theta1 = startangle / 360.0
# set default values in wedge_prop
if wedgeprops is None:
wedgeprops = {}
wedgeprops.setdefault('clip_on', False)
if textprops is None:
textprops = {}
textprops.setdefault('clip_on', False)
texts = []
slices = []
autotexts = []
i = 0
for frac, label, expl in zip(x, labels, explode):
x, y = center
theta2 = (theta1 + frac) if counterclock else (theta1 - frac)
thetam = 2 * np.pi * 0.5 * (theta1 + theta2)
x += expl * math.cos(thetam)
y += expl * math.sin(thetam)
w = mpatches.Wedge((x, y), radius, 360. * min(theta1, theta2),
360. * max(theta1, theta2),
facecolor=get_next_color(),
**wedgeprops)
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02)
shad.set_zorder(0.9 * w.get_zorder())
shad.set_label('_nolegend_')
self.add_patch(shad)
if labeldistance is not None:
xt = x + labeldistance * radius * math.cos(thetam)
yt = y + labeldistance * radius * math.sin(thetam)
label_alignment_h = 'left' if xt > 0 else 'right'
label_alignment_v = 'center'
label_rotation = 'horizontal'
if rotatelabels:
label_alignment_v = 'bottom' if yt > 0 else 'top'
label_rotation = (np.rad2deg(thetam)
+ (0 if xt > 0 else 180))
props = dict(horizontalalignment=label_alignment_h,
verticalalignment=label_alignment_v,
rotation=label_rotation,
size=rcParams['xtick.labelsize'])
props.update(textprops)
t = self.text(xt, yt, label, **props)
texts.append(t)
if autopct is not None:
xt = x + pctdistance * radius * math.cos(thetam)
yt = y + pctdistance * radius * math.sin(thetam)
if isinstance(autopct, str):
s = autopct % (100. * frac)
elif callable(autopct):
s = autopct(100. * frac)
else:
raise TypeError(
'autopct must be callable or a format string')
props = dict(horizontalalignment='center',
verticalalignment='center')
props.update(textprops)
t = self.text(xt, yt, s, **props)
autotexts.append(t)
theta1 = theta2
i += 1
if not frame:
self.set_frame_on(False)
self.set_xlim((-1.25 + center[0],
1.25 + center[0]))
self.set_ylim((-1.25 + center[1],
1.25 + center[1]))
self.set_xticks([])
self.set_yticks([])
if autopct is None:
return slices, texts
else:
return slices, texts, autotexts
@_preprocess_data(replace_names=["x", "y", "xerr", "yerr"],
label_namer="y")
@docstring.dedent_interpd
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='', ecolor=None, elinewidth=None, capsize=None,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1, capthick=None,
**kwargs):
"""
Plot y versus x as lines and/or markers with attached errorbars.
*x*, *y* define the data locations, *xerr*, *yerr* define the errorbar
sizes. By default, this draws the data markers/lines as well the
errorbars. Use fmt='none' to draw errorbars without any data markers.
Parameters
----------
x, y : scalar or array-like
The data positions.
xerr, yerr : scalar or array-like, shape(N,) or shape(2,N), optional
The errorbar sizes:
- scalar: Symmetric +/- values for all data points.
- shape(N,): Symmetric +/-values for each data point.
- shape(2,N): Separate - and + values for each bar. First row
contains the lower errors, the second row contains the
upper errors.
- *None*: No errorbar.
Note that all error arrays should have *positive* values.
See :doc:`/gallery/statistics/errorbar_features`
for an example on the usage of ``xerr`` and ``yerr``.
fmt : plot format string, optional, default: ''
The format for the data points / data lines. See `.plot` for
details.
Use 'none' (case insensitive) to plot errorbars without any data
markers.
ecolor : mpl color, optional, default: None
A matplotlib color arg which gives the color the errorbar lines.
If None, use the color of the line connecting the markers.
elinewidth : scalar, optional, default: None
The linewidth of the errorbar lines. If None, the linewidth of
the current style is used.
capsize : scalar, optional, default: None
The length of the error bar caps in points. If None, it will take
the value from :rc:`errorbar.capsize`.
capthick : scalar, optional, default: None
An alias to the keyword argument *markeredgewidth* (a.k.a. *mew*).
This setting is a more sensible name for the property that
controls the thickness of the error bar cap in points. For
backwards compatibility, if *mew* or *markeredgewidth* are given,
then they will over-ride *capthick*. This may change in future
releases.
barsabove : bool, optional, default: False
If True, will plot the errorbars above the plot
symbols. Default is below.
lolims, uplims, xlolims, xuplims : bool, optional, default: None
These arguments can be used to indicate that a value gives only
upper/lower limits. In that case a caret symbol is used to
indicate this. *lims*-arguments may be of the same type as *xerr*
and *yerr*. To use limits with inverted axes, :meth:`set_xlim`
or :meth:`set_ylim` must be called before :meth:`errorbar`.
errorevery : positive integer, optional, default: 1
Subsamples the errorbars. e.g., if errorevery=5, errorbars for
every 5-th datapoint will be plotted. The data plot itself still
shows all data points.
Returns
-------
container : :class:`~.container.ErrorbarContainer`
The container contains:
- plotline: :class:`~matplotlib.lines.Line2D` instance of
x, y plot markers and/or line.
- caplines: A tuple of :class:`~matplotlib.lines.Line2D` instances
of the error bar caps.
- barlinecols: A tuple of
:class:`~matplotlib.collections.LineCollection` with the
horizontal and vertical error ranges.
Other Parameters
----------------
**kwargs
All other keyword arguments are passed on to the plot
command for the markers. For example, this code makes big red
squares with thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s', mfc='red',
mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewidth*.
Valid kwargs for the marker properties are `.Lines2D` properties:
%(_Line2D_docstr)s
Notes
-----
.. [Notes section required for data comment. See #10189.]
"""
kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
# anything that comes in as 'None', drop so the default thing
# happens down stream
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs.setdefault('zorder', 2)
if errorevery < 1:
raise ValueError(
'errorevery has to be a strictly positive integer')
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
plot_line = (fmt.lower() != 'none')
label = kwargs.pop("label", None)
if fmt == '':
fmt_style_kwargs = {}
else:
fmt_style_kwargs = {k: v for k, v in
zip(('linestyle', 'marker', 'color'),
_process_plot_format(fmt))
if v is not None}
if fmt == 'none':
# Remove alpha=0 color that _process_plot_format returns
fmt_style_kwargs.pop('color')
if ('color' in kwargs or 'color' in fmt_style_kwargs or
ecolor is not None):
base_style = {}
if 'color' in kwargs:
base_style['color'] = kwargs.pop('color')
else:
base_style = next(self._get_lines.prop_cycler)
base_style['label'] = '_nolegend_'
base_style.update(fmt_style_kwargs)
if 'color' not in base_style:
base_style['color'] = 'C0'
if ecolor is None:
ecolor = base_style['color']
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not np.iterable(x):
x = [x]
if not np.iterable(y):
y = [y]
if xerr is not None:
if not np.iterable(xerr):
xerr = [xerr] * len(x)
if yerr is not None:
if not np.iterable(yerr):
yerr = [yerr] * len(y)
# make the style dict for the 'normal' plot line
plot_line_style = {
**base_style,
**kwargs,
'zorder': (kwargs['zorder'] - .1 if barsabove else
kwargs['zorder'] + .1),
}
# make the style dict for the line collections (the bars)
eb_lines_style = dict(base_style)
eb_lines_style.pop('marker', None)
eb_lines_style.pop('linestyle', None)
eb_lines_style['color'] = ecolor
if elinewidth:
eb_lines_style['linewidth'] = elinewidth
elif 'linewidth' in kwargs:
eb_lines_style['linewidth'] = kwargs['linewidth']
for key in ('transform', 'alpha', 'zorder', 'rasterized'):
if key in kwargs:
eb_lines_style[key] = kwargs[key]
# set up cap style dictionary
eb_cap_style = dict(base_style)
# eject any marker information from format string
eb_cap_style.pop('marker', None)
eb_lines_style.pop('markerfacecolor', None)
eb_lines_style.pop('markeredgewidth', None)
eb_lines_style.pop('markeredgecolor', None)
eb_cap_style.pop('ls', None)
eb_cap_style['linestyle'] = 'none'
if capsize is None:
capsize = rcParams["errorbar.capsize"]
if capsize > 0:
eb_cap_style['markersize'] = 2. * capsize
if capthick is not None:
eb_cap_style['markeredgewidth'] = capthick
# For backwards-compat, allow explicit setting of
# 'markeredgewidth' to over-ride capthick.
for key in ('markeredgewidth', 'transform', 'alpha',
'zorder', 'rasterized'):
if key in kwargs:
eb_cap_style[key] = kwargs[key]
eb_cap_style['color'] = ecolor
data_line = None
if plot_line:
data_line = mlines.Line2D(x, y, **plot_line_style)
self.add_line(data_line)
barcols = []
caplines = []
# arrays fine here, they are booleans and hence not units
lolims = np.broadcast_to(lolims, len(x)).astype(bool)
uplims = np.broadcast_to(uplims, len(x)).astype(bool)
xlolims = np.broadcast_to(xlolims, len(x)).astype(bool)
xuplims = np.broadcast_to(xuplims, len(x)).astype(bool)
everymask = np.arange(len(x)) % errorevery == 0
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs) == len(ys)
assert len(xs) == len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
def extract_err(err, data):
"""
Private function to parse *err* and subtract/add it to *data*.
Both *err* and *data* are already iterables at this point.
"""
try: # Asymmetric error: pair of 1D iterables.
a, b = err
iter(a)
iter(b)
except (TypeError, ValueError):
a = b = err # Symmetric error: 1D iterable.
# This could just be `np.ndim(a) > 1 and np.ndim(b) > 1`, except
# for the (undocumented, but tested) support for (n, 1) arrays.
a_sh = np.shape(a)
b_sh = np.shape(b)
if (len(a_sh) > 2 or (len(a_sh) == 2 and a_sh[1] != 1)
or len(b_sh) > 2 or (len(b_sh) == 2 and b_sh[1] != 1)):
raise ValueError(
"err must be a scalar or a 1D or (2, n) array-like")
if len(a_sh) == 2 or len(b_sh) == 2:
cbook.warn_deprecated(
"3.1", message="Support for passing a (n, 1)-shaped error "
"array to errorbar() is deprecated since Matplotlib "
"%(since)s and will be removed %(removal)s; pass a 1D "
"array instead.")
# Using list comprehensions rather than arrays to preserve units.
for e in [a, b]:
if len(data) != len(e):
raise ValueError(
f"The lengths of the data ({len(data)}) and the "
f"error {len(e)} do not match")
low = [v - e for v, e in zip(data, a)]
high = [v + e for v, e in zip(data, b)]
return low, high
if xerr is not None:
left, right = extract_err(xerr, x)
# select points without upper/lower limits in x and
# draw normal errorbars for these points
noxlims = ~(xlolims | xuplims)
if noxlims.any() or len(noxlims) == 0:
yo, _ = xywhere(y, right, noxlims & everymask)
lo, ro = xywhere(left, right, noxlims & everymask)
barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
if capsize > 0:
caplines.append(mlines.Line2D(lo, yo, marker='|',
**eb_cap_style))
caplines.append(mlines.Line2D(ro, yo, marker='|',
**eb_cap_style))
if xlolims.any():
yo, _ = xywhere(y, right, xlolims & everymask)
lo, ro = xywhere(x, right, xlolims & everymask)
barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
rightup, yup = xywhere(right, y, xlolims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETLEFTBASE
else:
marker = mlines.CARETRIGHTBASE
caplines.append(
mlines.Line2D(rightup, yup, ls='None', marker=marker,
**eb_cap_style))
if capsize > 0:
xlo, ylo = xywhere(x, y, xlolims & everymask)
caplines.append(mlines.Line2D(xlo, ylo, marker='|',
**eb_cap_style))
if xuplims.any():
yo, _ = xywhere(y, right, xuplims & everymask)
lo, ro = xywhere(left, x, xuplims & everymask)
barcols.append(self.hlines(yo, lo, ro, **eb_lines_style))
leftlo, ylo = xywhere(left, y, xuplims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETRIGHTBASE
else:
marker = mlines.CARETLEFTBASE
caplines.append(
mlines.Line2D(leftlo, ylo, ls='None', marker=marker,
**eb_cap_style))
if capsize > 0:
xup, yup = xywhere(x, y, xuplims & everymask)
caplines.append(mlines.Line2D(xup, yup, marker='|',
**eb_cap_style))
if yerr is not None:
lower, upper = extract_err(yerr, y)
# select points without upper/lower limits in y and
# draw normal errorbars for these points
noylims = ~(lolims | uplims)
if noylims.any() or len(noylims) == 0:
xo, _ = xywhere(x, lower, noylims & everymask)
lo, uo = xywhere(lower, upper, noylims & everymask)
barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
if capsize > 0:
caplines.append(mlines.Line2D(xo, lo, marker='_',
**eb_cap_style))
caplines.append(mlines.Line2D(xo, uo, marker='_',
**eb_cap_style))
if lolims.any():
xo, _ = xywhere(x, lower, lolims & everymask)
lo, uo = xywhere(y, upper, lolims & everymask)
barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
xup, upperup = xywhere(x, upper, lolims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETDOWNBASE
else:
marker = mlines.CARETUPBASE
caplines.append(
mlines.Line2D(xup, upperup, ls='None', marker=marker,
**eb_cap_style))
if capsize > 0:
xlo, ylo = xywhere(x, y, lolims & everymask)
caplines.append(mlines.Line2D(xlo, ylo, marker='_',
**eb_cap_style))
if uplims.any():
xo, _ = xywhere(x, lower, uplims & everymask)
lo, uo = xywhere(lower, y, uplims & everymask)
barcols.append(self.vlines(xo, lo, uo, **eb_lines_style))
xlo, lowerlo = xywhere(x, lower, uplims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETUPBASE
else:
marker = mlines.CARETDOWNBASE
caplines.append(
mlines.Line2D(xlo, lowerlo, ls='None', marker=marker,
**eb_cap_style))
if capsize > 0:
xup, yup = xywhere(x, y, uplims & everymask)
caplines.append(mlines.Line2D(xup, yup, marker='_',
**eb_cap_style))
for l in caplines:
self.add_line(l)
self.autoscale_view()
errorbar_container = ErrorbarContainer((data_line, tuple(caplines),
tuple(barcols)),
has_xerr=(xerr is not None),
has_yerr=(yerr is not None),
label=label)
self.containers.append(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
@cbook._rename_parameter("3.1", "manage_xticks", "manage_ticks")
@_preprocess_data()
def boxplot(self, x, notch=None, sym=None, vert=None, whis=None,
positions=None, widths=None, patch_artist=None,
bootstrap=None, usermedians=None, conf_intervals=None,
meanline=None, showmeans=None, showcaps=None,
showbox=None, showfliers=None, boxprops=None,
labels=None, flierprops=None, medianprops=None,
meanprops=None, capprops=None, whiskerprops=None,
manage_ticks=True, autorange=False, zorder=None):
"""
Make a box and whisker plot.
Make a box and whisker plot for each column of ``x`` or each
vector in sequence ``x``. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Parameters
----------
x : Array or a sequence of vectors.
The input data.
notch : bool, optional (False)
If `True`, will produce a notched box plot. Otherwise, a
rectangular boxplot is produced. The notches represent the
confidence interval (CI) around the median. See the entry
for the ``bootstrap`` parameter for information regarding
how the locations of the notches are computed.
.. note::
In cases where the values of the CI are less than the
lower quartile or greater than the upper quartile, the
notches will extend beyond the box, giving it a
distinctive "flipped" appearance. This is expected
behavior and consistent with other statistical
visualization packages.
sym : str, optional
The default symbol for flier points. Enter an empty string
('') if you don't want to show fliers. If `None`, then the
fliers default to 'b+' If you want more control use the
flierprops kwarg.
vert : bool, optional (True)
If `True` (default), makes the boxes vertical. If `False`,
everything is drawn horizontally.
whis : float, sequence, or string (default = 1.5)
As a float, determines the reach of the whiskers to the beyond the
first and third quartiles. In other words, where IQR is the
interquartile range (`Q3-Q1`), the upper whisker will extend to
last datum less than `Q3 + whis*IQR`). Similarly, the lower whisker
will extend to the first datum greater than `Q1 - whis*IQR`.
Beyond the whiskers, data
are considered outliers and are plotted as individual
points. Set this to an unreasonably high value to force the
whiskers to show the min and max values. Alternatively, set
this to an ascending sequence of percentile (e.g., [5, 95])
to set the whiskers at specific percentiles of the data.
Finally, ``whis`` can be the string ``'range'`` to force the
whiskers to the min and max of the data.
bootstrap : int, optional
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If ``bootstrap`` is
None, no bootstrapping is performed, and notches are
calculated using a Gaussian-based asymptotic approximation
(see McGill, R., Tukey, J.W., and Larsen, W.A., 1978, and
Kendall and Stuart, 1967). Otherwise, bootstrap specifies
the number of times to bootstrap the median to determine its
95% confidence intervals. Values between 1000 and 10000 are
recommended.
usermedians : array-like, optional
An array or sequence whose first dimension (or length) is
compatible with ``x``. This overrides the medians computed
by matplotlib for each element of ``usermedians`` that is not
`None`. When an element of ``usermedians`` is None, the median
will be computed by matplotlib as normal.
conf_intervals : array-like, optional
Array or sequence whose first dimension (or length) is
compatible with ``x`` and whose second dimension is 2. When
the an element of ``conf_intervals`` is not None, the
notch locations computed by matplotlib are overridden
(provided ``notch`` is `True`). When an element of
``conf_intervals`` is `None`, the notches are computed by the
method specified by the other kwargs (e.g., ``bootstrap``).
positions : array-like, optional
Sets the positions of the boxes. The ticks and limits are
automatically set to match the positions. Defaults to
`range(1, N+1)` where N is the number of boxes to be drawn.
widths : scalar or array-like
Sets the width of each box either with a scalar or a
sequence. The default is 0.5, or ``0.15*(distance between
extreme positions)``, if that is smaller.
patch_artist : bool, optional (False)
If `False` produces boxes with the Line2D artist. Otherwise,
boxes and drawn with Patch artists.
labels : sequence, optional
Labels for each dataset. Length must be compatible with
dimensions of ``x``.
manage_ticks : bool, optional (True)
If True, the tick locations and labels will be adjusted to match
the boxplot positions.
autorange : bool, optional (False)
When `True` and the data are distributed such that the 25th and
75th percentiles are equal, ``whis`` is set to ``'range'`` such
that the whisker ends are at the minimum and maximum of the data.
meanline : bool, optional (False)
If `True` (and ``showmeans`` is `True`), will try to render
the mean as a line spanning the full width of the box
according to ``meanprops`` (see below). Not recommended if
``shownotches`` is also True. Otherwise, means will be shown
as points.
zorder : scalar, optional (None)
Sets the zorder of the boxplot.
Other Parameters
----------------
showcaps : bool, optional (True)
Show the caps on the ends of whiskers.
showbox : bool, optional (True)
Show the central box.
showfliers : bool, optional (True)
Show the outliers beyond the caps.
showmeans : bool, optional (False)
Show the arithmetic means.
capprops : dict, optional (None)
Specifies the style of the caps.
boxprops : dict, optional (None)
Specifies the style of the box.
whiskerprops : dict, optional (None)
Specifies the style of the whiskers.
flierprops : dict, optional (None)
Specifies the style of the fliers.
medianprops : dict, optional (None)
Specifies the style of the median.
meanprops : dict, optional (None)
Specifies the style of the mean.
Returns
-------
result : dict
A dictionary mapping each component of the boxplot to a list
of the :class:`matplotlib.lines.Line2D` instances
created. That dictionary has the following keys (assuming
vertical boxplots):
- ``boxes``: the main body of the boxplot showing the
quartiles and the median's confidence intervals if
enabled.
- ``medians``: horizontal lines at the median of each box.
- ``whiskers``: the vertical lines extending to the most
extreme, non-outlier data points.
- ``caps``: the horizontal lines at the ends of the
whiskers.
- ``fliers``: points representing data that extend beyond
the whiskers (fliers).
- ``means``: points or lines representing the means.
Notes
-----
.. [Notes section required for data comment. See #10189.]
"""
# Missing arguments default to rcParams.
if whis is None:
whis = rcParams['boxplot.whiskers']
if bootstrap is None:
bootstrap = rcParams['boxplot.bootstrap']
bxpstats = cbook.boxplot_stats(x, whis=whis, bootstrap=bootstrap,
labels=labels, autorange=autorange)
if notch is None:
notch = rcParams['boxplot.notch']
if vert is None:
vert = rcParams['boxplot.vertical']
if patch_artist is None:
patch_artist = rcParams['boxplot.patchartist']
if meanline is None:
meanline = rcParams['boxplot.meanline']
if showmeans is None:
showmeans = rcParams['boxplot.showmeans']
if showcaps is None:
showcaps = rcParams['boxplot.showcaps']
if showbox is None:
showbox = rcParams['boxplot.showbox']
if showfliers is None:
showfliers = rcParams['boxplot.showfliers']
if boxprops is None:
boxprops = {}
if whiskerprops is None:
whiskerprops = {}
if capprops is None:
capprops = {}
if medianprops is None:
medianprops = {}
if meanprops is None:
meanprops = {}
if flierprops is None:
flierprops = {}
if patch_artist:
boxprops['linestyle'] = 'solid' # Not consistent with bxp.
if 'color' in boxprops:
boxprops['edgecolor'] = boxprops.pop('color')
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == '':
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle='none', marker='', color='none')
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops['marker'] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops['color'] = color
flierprops['markerfacecolor'] = color
flierprops['markeredgecolor'] = color
# replace medians if necessary:
if usermedians is not None:
if (len(np.ravel(usermedians)) != len(bxpstats) or
np.shape(usermedians)[0] != len(bxpstats)):
raise ValueError('usermedians length not compatible with x')
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats['med'] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
err_mess = 'conf_intervals length not compatible with x'
raise ValueError(err_mess)
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError('each confidence interval must '
'have two values')
else:
if ci[0] is not None:
stats['cilo'] = ci[0]
if ci[1] is not None:
stats['cihi'] = ci[1]
artists = self.bxp(bxpstats, positions=positions, widths=widths,
vert=vert, patch_artist=patch_artist,
shownotches=notch, showmeans=showmeans,
showcaps=showcaps, showbox=showbox,
boxprops=boxprops, flierprops=flierprops,
medianprops=medianprops, meanprops=meanprops,
meanline=meanline, showfliers=showfliers,
capprops=capprops, whiskerprops=whiskerprops,
manage_ticks=manage_ticks, zorder=zorder)
return artists
@cbook._rename_parameter("3.1", "manage_xticks", "manage_ticks")
def bxp(self, bxpstats, positions=None, widths=None, vert=True,
patch_artist=False, shownotches=False, showmeans=False,
showcaps=True, showbox=True, showfliers=True,
boxprops=None, whiskerprops=None, flierprops=None,
medianprops=None, capprops=None, meanprops=None,
meanline=False, manage_ticks=True, zorder=None):
"""
Drawing function for box and whisker plots.
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Parameters
----------
bxpstats : list of dicts
A list of dictionaries containing stats for each boxplot.
Required keys are:
- ``med``: The median (scalar float).
- ``q1``: The first quartile (25th percentile) (scalar
float).
- ``q3``: The third quartile (75th percentile) (scalar
float).
- ``whislo``: Lower bound of the lower whisker (scalar
float).
- ``whishi``: Upper bound of the upper whisker (scalar
float).
Optional keys are:
- ``mean``: The mean (scalar float). Needed if
``showmeans=True``.
- ``fliers``: Data beyond the whiskers (sequence of floats).
Needed if ``showfliers=True``.
- ``cilo`` & ``cihi``: Lower and upper confidence intervals
about the median. Needed if ``shownotches=True``.
- ``label``: Name of the dataset (string). If available,
this will be used a tick label for the boxplot
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the boxes. The ticks and limits
are automatically set to match the positions.
widths : array-like, default = None
Either a scalar or a vector and sets the width of each
box. The default is ``0.15*(distance between extreme
positions)``, clipped to no less than 0.15 and no more than
0.5.
vert : bool, default = True
If `True` (default), makes the boxes vertical. If `False`,
makes horizontal boxes.
patch_artist : bool, default = False
If `False` produces boxes with the
`~matplotlib.lines.Line2D` artist. If `True` produces boxes
with the `~matplotlib.patches.Patch` artist.
shownotches : bool, default = False
If `False` (default), produces a rectangular box plot.
If `True`, will produce a notched box plot
showmeans : bool, default = False
If `True`, will toggle on the rendering of the means
showcaps : bool, default = True
If `True`, will toggle on the rendering of the caps
showbox : bool, default = True
If `True`, will toggle on the rendering of the box
showfliers : bool, default = True
If `True`, will toggle on the rendering of the fliers
boxprops : dict or None (default)
If provided, will set the plotting style of the boxes
whiskerprops : dict or None (default)
If provided, will set the plotting style of the whiskers
capprops : dict or None (default)
If provided, will set the plotting style of the caps
flierprops : dict or None (default)
If provided will set the plotting style of the fliers
medianprops : dict or None (default)
If provided, will set the plotting style of the medians
meanprops : dict or None (default)
If provided, will set the plotting style of the means
meanline : bool, default = False
If `True` (and *showmeans* is `True`), will try to render the mean
as a line spanning the full width of the box according to
*meanprops*. Not recommended if *shownotches* is also True.
Otherwise, means will be shown as points.
manage_ticks : bool, default = True
If True, the tick locations and labels will be adjusted to match the
boxplot positions.
zorder : scalar, default = None
The zorder of the resulting boxplot.
Returns
-------
result : dict
A dictionary mapping each component of the boxplot to a list
of the :class:`matplotlib.lines.Line2D` instances
created. That dictionary has the following keys (assuming
vertical boxplots):
- ``boxes``: the main body of the boxplot showing the
quartiles and the median's confidence intervals if
enabled.
- ``medians``: horizontal lines at the median of each box.
- ``whiskers``: the vertical lines extending to the most
extreme, non-outlier data points.
- ``caps``: the horizontal lines at the ends of the
whiskers.
- ``fliers``: points representing data that extend beyond
the whiskers (fliers).
- ``means``: points or lines representing the means.
Examples
--------
.. plot:: gallery/statistics/bxp.py
"""
# lists of artists to be output
whiskers = []
caps = []
boxes = []
medians = []
means = []
fliers = []
# empty list of xticklabels
datalabels = []
# Use default zorder if none specified
if zorder is None:
zorder = mlines.Line2D.zorder
zdelta = 0.1
def with_rcdefaults(subkey, explicit, zdelta=0):
d = {k.split('.')[-1]: v for k, v in rcParams.items()
if k.startswith(f'boxplot.{subkey}')}
d['zorder'] = zorder + zdelta
if explicit is not None:
d.update(explicit)
return d
# box properties
if patch_artist:
final_boxprops = dict(
linestyle=rcParams['boxplot.boxprops.linestyle'],
linewidth=rcParams['boxplot.boxprops.linewidth'],
edgecolor=rcParams['boxplot.boxprops.color'],
facecolor=('white' if rcParams['_internal.classic_mode'] else
rcParams['patch.facecolor']),
zorder=zorder,
)
if boxprops is not None:
final_boxprops.update(boxprops)
else:
final_boxprops = with_rcdefaults('boxprops', boxprops)
final_whiskerprops = with_rcdefaults('whiskerprops', whiskerprops)
final_capprops = with_rcdefaults('capprops', capprops)
final_flierprops = with_rcdefaults('flierprops', flierprops)
final_medianprops = with_rcdefaults('medianprops', medianprops, zdelta)
final_meanprops = with_rcdefaults('meanprops', meanprops, zdelta)
removed_prop = 'marker' if meanline else 'linestyle'
# Only remove the property if it's not set explicitly as a parameter.
if meanprops is None or removed_prop not in meanprops:
final_meanprops[removed_prop] = ''
def to_vc(xs, ys):
# convert arguments to verts and codes, append (0, 0) (ignored).
verts = np.append(np.column_stack([xs, ys]), [(0, 0)], 0)
codes = ([mpath.Path.MOVETO]
+ [mpath.Path.LINETO] * (len(verts) - 2)
+ [mpath.Path.CLOSEPOLY])
return verts, codes
def patch_list(xs, ys, **kwargs):
verts, codes = to_vc(xs, ys)
path = mpath.Path(verts, codes)
patch = mpatches.PathPatch(path, **kwargs)
self.add_artist(patch)
return [patch]
# vertical or horizontal plot?
if vert:
def doplot(*args, **kwargs):
return self.plot(*args, **kwargs)
def dopatch(xs, ys, **kwargs):
return patch_list(xs, ys, **kwargs)
else:
def doplot(*args, **kwargs):
shuffled = []
for i in range(0, len(args), 2):
shuffled.extend([args[i + 1], args[i]])
return self.plot(*shuffled, **kwargs)
def dopatch(xs, ys, **kwargs):
xs, ys = ys, xs # flip X, Y
return patch_list(xs, ys, **kwargs)
# input validation
N = len(bxpstats)
datashape_message = ("List of boxplot statistics and `{0}` "
"values must have same the length")
# check position
if positions is None:
positions = list(range(1, N + 1))
elif len(positions) != N:
raise ValueError(datashape_message.format("positions"))
positions = np.array(positions)
if len(positions) > 0 and not isinstance(positions[0], Number):
raise TypeError("positions should be an iterable of numbers")
# width
if widths is None:
widths = [np.clip(0.15 * np.ptp(positions), 0.15, 0.5)] * N
elif np.isscalar(widths):
widths = [widths] * N
elif len(widths) != N:
raise ValueError(datashape_message.format("widths"))
for pos, width, stats in zip(positions, widths, bxpstats):
# try to find a new label
datalabels.append(stats.get('label', pos))
# whisker coords
whisker_x = np.ones(2) * pos
whiskerlo_y = np.array([stats['q1'], stats['whislo']])
whiskerhi_y = np.array([stats['q3'], stats['whishi']])
# cap coords
cap_left = pos - width * 0.25
cap_right = pos + width * 0.25
cap_x = np.array([cap_left, cap_right])
cap_lo = np.ones(2) * stats['whislo']
cap_hi = np.ones(2) * stats['whishi']
# box and median coords
box_left = pos - width * 0.5
box_right = pos + width * 0.5
med_y = [stats['med'], stats['med']]
# notched boxes
if shownotches:
box_x = [box_left, box_right, box_right, cap_right, box_right,
box_right, box_left, box_left, cap_left, box_left,
box_left]
box_y = [stats['q1'], stats['q1'], stats['cilo'],
stats['med'], stats['cihi'], stats['q3'],
stats['q3'], stats['cihi'], stats['med'],
stats['cilo'], stats['q1']]
med_x = cap_x
# plain boxes
else:
box_x = [box_left, box_right, box_right, box_left, box_left]
box_y = [stats['q1'], stats['q1'], stats['q3'], stats['q3'],
stats['q1']]
med_x = [box_left, box_right]
# maybe draw the box:
if showbox:
if patch_artist:
boxes.extend(dopatch(box_x, box_y, **final_boxprops))
else:
boxes.extend(doplot(box_x, box_y, **final_boxprops))
# draw the whiskers
whiskers.extend(doplot(
whisker_x, whiskerlo_y, **final_whiskerprops
))
whiskers.extend(doplot(
whisker_x, whiskerhi_y, **final_whiskerprops
))
# maybe draw the caps:
if showcaps:
caps.extend(doplot(cap_x, cap_lo, **final_capprops))
caps.extend(doplot(cap_x, cap_hi, **final_capprops))
# draw the medians
medians.extend(doplot(med_x, med_y, **final_medianprops))
# maybe draw the means
if showmeans:
if meanline:
means.extend(doplot(
[box_left, box_right], [stats['mean'], stats['mean']],
**final_meanprops
))
else:
means.extend(doplot(
[pos], [stats['mean']], **final_meanprops
))
# maybe draw the fliers
if showfliers:
# fliers coords
flier_x = np.full(len(stats['fliers']), pos, dtype=np.float64)
flier_y = stats['fliers']
fliers.extend(doplot(
flier_x, flier_y, **final_flierprops
))
if manage_ticks:
axis_name = "x" if vert else "y"
interval = getattr(self.dataLim, f"interval{axis_name}")
axis = getattr(self, f"{axis_name}axis")
positions = axis.convert_units(positions)
# The 0.5 additional padding ensures reasonable-looking boxes
# even when drawing a single box. We set the sticky edge to
# prevent margins expansion, in order to match old behavior (back
# when separate calls to boxplot() would completely reset the axis
# limits regardless of what was drawn before). The sticky edges
# are attached to the median lines, as they are always present.
interval[:] = (min(interval[0], min(positions) - .5),
max(interval[1], max(positions) + .5))
for median, position in zip(medians, positions):
getattr(median.sticky_edges, axis_name).extend(
[position - .5, position + .5])
# Modified from Axis.set_ticks and Axis.set_ticklabels.
locator = axis.get_major_locator()
if not isinstance(axis.get_major_locator(),
mticker.FixedLocator):
locator = mticker.FixedLocator([])
axis.set_major_locator(locator)
locator.locs = np.array([*locator.locs, *positions])
formatter = axis.get_major_formatter()
if not isinstance(axis.get_major_formatter(),
mticker.FixedFormatter):
formatter = mticker.FixedFormatter([])
axis.set_major_formatter(formatter)
formatter.seq = [*formatter.seq, *datalabels]
self.autoscale_view(
scalex=self._autoscaleXon, scaley=self._autoscaleYon)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers, means=means)
@staticmethod
def _parse_scatter_color_args(c, edgecolors, kwargs, xshape, yshape,
get_next_color_func):
"""
Helper function to process color related arguments of `.Axes.scatter`.
Argument precedence for facecolors:
- c (if not None)
- kwargs['facecolors']
- kwargs['facecolor']
- kwargs['color'] (==kwcolor)
- 'b' if in classic mode else the result of ``get_next_color_func()``
Argument precedence for edgecolors:
- edgecolors (is an explicit kw argument in scatter())
- kwargs['edgecolor']
- kwargs['color'] (==kwcolor)
- 'face' if not in classic mode else None
Parameters
----------
c : color or sequence or sequence of color or None
See argument description of `.Axes.scatter`.
edgecolors : color or sequence of color or {'face', 'none'} or None
See argument description of `.Axes.scatter`.
kwargs : dict
Additional kwargs. If these keys exist, we pop and process them:
'facecolors', 'facecolor', 'edgecolor', 'color'
Note: The dict is modified by this function.
xshape, yshape : tuple of int
The shape of the x and y arrays passed to `.Axes.scatter`.
get_next_color_func : callable
A callable that returns a color. This color is used as facecolor
if no other color is provided.
Note, that this is a function rather than a fixed color value to
support conditional evaluation of the next color. As of the
current implementation obtaining the next color from the
property cycle advances the cycle. This must only happen if we
actually use the color, which will only be decided within this
method.
Returns
-------
c
The input *c* if it was not *None*, else some color specification
derived from the other inputs or defaults.
colors : array(N, 4) or None
The facecolors as RGBA values or *None* if a colormap is used.
edgecolors
The edgecolor specification.
"""
xsize = functools.reduce(operator.mul, xshape, 1)
ysize = functools.reduce(operator.mul, yshape, 1)
facecolors = kwargs.pop('facecolors', None)
facecolors = kwargs.pop('facecolor', facecolors)
edgecolors = kwargs.pop('edgecolor', edgecolors)
kwcolor = kwargs.pop('color', None)
if kwcolor is not None and c is not None:
raise ValueError("Supply a 'c' argument or a 'color'"
" kwarg but not both; they differ but"
" their functionalities overlap.")
if kwcolor is not None:
try:
mcolors.to_rgba_array(kwcolor)
except ValueError:
raise ValueError("'color' kwarg must be an mpl color"
" spec or sequence of color specs.\n"
"For a sequence of values to be color-mapped,"
" use the 'c' argument instead.")
if edgecolors is None:
edgecolors = kwcolor
if facecolors is None:
facecolors = kwcolor
if edgecolors is None and not rcParams['_internal.classic_mode']:
edgecolors = rcParams['scatter.edgecolors']
c_was_none = c is None
if c is None:
c = (facecolors if facecolors is not None
else "b" if rcParams['_internal.classic_mode']
else get_next_color_func())
# After this block, c_array will be None unless
# c is an array for mapping. The potential ambiguity
# with a sequence of 3 or 4 numbers is resolved in
# favor of mapping, not rgb or rgba.
# Convenience vars to track shape mismatch *and* conversion failures.
valid_shape = True # will be put to the test!
n_elem = -1 # used only for (some) exceptions
if (c_was_none or
kwcolor is not None or
isinstance(c, str) or
(isinstance(c, collections.abc.Iterable) and
len(c) > 0 and
isinstance(cbook.safe_first_element(c), str))):
c_array = None
else:
try: # First, does 'c' look suitable for value-mapping?
c_array = np.asanyarray(c, dtype=float)
n_elem = c_array.shape[0]
if c_array.shape in [xshape, yshape]:
c = np.ma.ravel(c_array)
else:
if c_array.shape in ((3,), (4,)):
_log.warning(
"'c' argument looks like a single numeric RGB or "
"RGBA sequence, which should be avoided as value-"
"mapping will have precedence in case its length "
"matches with 'x' & 'y'. Please use a 2-D array "
"with a single row if you really want to specify "
"the same RGB or RGBA value for all points.")
# Wrong size; it must not be intended for mapping.
valid_shape = False
c_array = None
except ValueError:
# Failed to make a floating-point array; c must be color specs.
c_array = None
if c_array is None:
try: # Then is 'c' acceptable as PathCollection facecolors?
colors = mcolors.to_rgba_array(c)
n_elem = colors.shape[0]
if colors.shape[0] not in (0, 1, xsize, ysize):
# NB: remember that a single color is also acceptable.
# Besides *colors* will be an empty array if c == 'none'.
valid_shape = False
raise ValueError
except ValueError:
if not valid_shape: # but at least one conversion succeeded.
raise ValueError(
"'c' argument has {nc} elements, which is not "
"acceptable for use with 'x' with size {xs}, "
"'y' with size {ys}."
.format(nc=n_elem, xs=xsize, ys=ysize)
)
else:
# Both the mapping *and* the RGBA conversion failed: pretty
# severe failure => one may appreciate a verbose feedback.
raise ValueError(
"'c' argument must be a mpl color, a sequence of mpl "
"colors or a sequence of numbers, not {}."
.format(c) # note: could be long depending on c
)
else:
colors = None # use cmap, norm after collection is created
return c, colors, edgecolors
@_preprocess_data(replace_names=["x", "y", "s", "linewidths",
"edgecolors", "c", "facecolor",
"facecolors", "color"],
label_namer="y")
def scatter(self, x, y, s=None, c=None, marker=None, cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
verts=None, edgecolors=None, *, plotnonfinite=False,
**kwargs):
"""
A scatter plot of *y* vs *x* with varying marker size and/or color.
Parameters
----------
x, y : array_like, shape (n, )
The data positions.
s : scalar or array_like, shape (n, ), optional
The marker size in points**2.
Default is ``rcParams['lines.markersize'] ** 2``.
c : color, sequence, or sequence of color, optional
The marker color. Possible values:
- A single color format string.
- A sequence of color specifications of length n.
- A sequence of n numbers to be mapped to colors using *cmap* and
*norm*.
- A 2-D array in which the rows are RGB or RGBA.
Note that *c* should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values to be
colormapped. If you want to specify the same RGB or RGBA value for
all points, use a 2-D array with a single row. Otherwise, value-
matching will have precedence in case of a size matching with *x*
and *y*.
Defaults to ``None``. In that case the marker color is determined
by the value of ``color``, ``facecolor`` or ``facecolors``. In case
those are not specified or ``None``, the marker color is determined
by the next color of the ``Axes``' current "shape and fill" color
cycle. This cycle defaults to :rc:`axes.prop_cycle`.
marker : `~matplotlib.markers.MarkerStyle`, optional
The marker style. *marker* can be either an instance of the class
or the text shorthand for a particular marker.
Defaults to ``None``, in which case it takes the value of
:rc:`scatter.marker` = 'o'.
See `~matplotlib.markers` for more information about marker styles.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
A `.Colormap` instance or registered colormap name. *cmap* is only
used if *c* is an array of floats. If ``None``, defaults to rc
``image.cmap``.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `.Normalize` instance is used to scale luminance data to 0, 1.
*norm* is only used if *c* is an array of floats. If *None*, use
the default `.colors.Normalize`.
vmin, vmax : scalar, optional, default: None
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If None, the respective min and max of the color
array is used. *vmin* and *vmax* are ignored if you pass a *norm*
instance.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque).
linewidths : scalar or array_like, optional, default: None
The linewidth of the marker edges. Note: The default *edgecolors*
is 'face'. You may want to change this as well.
If *None*, defaults to rcParams ``lines.linewidth``.
edgecolors : {'face', 'none', *None*} or color or sequence of color, \
optional.
The edge color of the marker. Possible values:
- 'face': The edge color will always be the same as the face color.
- 'none': No patch boundary will be drawn.
- A Matplotlib color or sequence of color.
Defaults to ``None``, in which case it takes the value of
:rc:`scatter.edgecolors` = 'face'.
For non-filled markers, the *edgecolors* kwarg is ignored and
forced to 'face' internally.
plotnonfinite : boolean, optional, default: False
Set to plot points with nonfinite *c*, in conjunction with
`~matplotlib.colors.Colormap.set_bad`.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Other Parameters
----------------
**kwargs : `~matplotlib.collections.Collection` properties
See Also
--------
plot : To plot scatter plots when markers are identical in size and
color.
Notes
-----
* The `.plot` function will be faster for scatterplots where markers
don't vary in size or color.
* Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in which
case all masks will be combined and only unmasked points will be
plotted.
* Fundamentally, scatter works with 1-D arrays; *x*, *y*, *s*, and *c*
may be input as 2-D arrays, but within scatter they will be
flattened. The exception is *c*, which will be flattened only if its
size matches the size of *x* and *y*.
"""
# Process **kwargs to handle aliases, conflicts with explicit kwargs:
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
xshape, yshape = np.shape(x), np.shape(y)
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
if s is None:
s = (20 if rcParams['_internal.classic_mode'] else
rcParams['lines.markersize'] ** 2.0)
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
c, colors, edgecolors = \
self._parse_scatter_color_args(
c, edgecolors, kwargs, xshape, yshape,
get_next_color_func=self._get_patches_for_fill.get_next_color)
if plotnonfinite and colors is None:
c = np.ma.masked_invalid(c)
x, y, s, edgecolors, linewidths = \
cbook._combine_masks(x, y, s, edgecolors, linewidths)
else:
x, y, s, c, colors, edgecolors, linewidths = \
cbook._combine_masks(
x, y, s, c, colors, edgecolors, linewidths)
scales = s # Renamed for readability below.
# to be API compatible
if verts is not None:
cbook.warn_deprecated("3.0", name="'verts'", obj_type="kwarg",
alternative="'marker'")
if marker is None:
marker = verts
# load default marker from rcParams
if marker is None:
marker = rcParams['scatter.marker']
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = 'face'
linewidths = rcParams['lines.linewidth']
offsets = np.ma.column_stack([x, y])
collection = mcoll.PathCollection(
(path,), scales,
facecolors=colors,
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=kwargs.pop('transform', self.transData),
alpha=alpha
)
collection.set_transform(mtransforms.IdentityTransform())
collection.update(kwargs)
if colors is None:
if norm is not None and not isinstance(norm, mcolors.Normalize):
raise ValueError(
"'norm' must be an instance of 'mcolors.Normalize'")
collection.set_array(c)
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# Classic mode only:
# ensure there are margins to allow for the
# finite size of the symbols. In v2.x, margins
# are present by default, so we disable this
# scatter-specific override.
if rcParams['_internal.classic_mode']:
if self._xmargin < 0.05 and x.size > 0:
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0:
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
@_preprocess_data(replace_names=["x", "y"], label_namer="y")
@docstring.dedent_interpd
def hexbin(self, x, y, C=None, gridsize=100, bins=None,
xscale='linear', yscale='linear', extent=None,
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='face',
reduce_C_function=np.mean, mincnt=None, marginals=False,
**kwargs):
"""
Make a hexagonal binning plot.
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is *None*
(the default), this is a histogram of the number of occurrences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i], y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to `numpy.mean`. (If *C* is specified, it must also
be a 1-D sequence of the same length as *x* and *y*.)
Parameters
----------
x, y : array or masked array
C : array or masked array, optional, default is *None*
gridsize : int or (int, int), optional, default is 100
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
bins : 'log' or int or sequence, optional, default is *None*
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
xscale : {'linear', 'log'}, optional, default is 'linear'
Use a linear or log10 scale on the horizontal axis.
yscale : {'linear', 'log'}, optional, default is 'linear'
Use a linear or log10 scale on the vertical axis.
mincnt : int > 0, optional, default is *None*
If not *None*, only display cells with more than *mincnt*
number of points in the cell
marginals : bool, optional, default is *False*
if marginals is *True*, plot the marginal density as
colormapped rectangles along the bottom of the x-axis and
left of the y-axis
extent : scalar, optional, default is *None*
The limits of the bins. The default assigns the limits
based on *gridsize*, *x*, *y*, *xscale* and *yscale*.
If *xscale* or *yscale* is set to 'log', the limits are
expected to be the exponent for a power of 10. E.g. for
x-limits of 1 and 50 in 'linear' scale and y-limits
of 10 and 1000 in 'log' scale, enter (1, 50, 1, 3).
Order of scalars is (left, right, bottom, top).
Other Parameters
----------------
cmap : object, optional, default is *None*
a :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
norm : object, optional, default is *None*
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
vmin, vmax : scalar, optional, default is *None*
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If *None*, the min and max of the
color array *C* are used. Note if you pass a norm instance
your settings for *vmin* and *vmax* will be ignored.
alpha : scalar between 0 and 1, optional, default is *None*
the alpha value for the patches
linewidths : scalar, optional, default is *None*
If *None*, defaults to 1.0.
edgecolors : {'face', 'none', *None*} or color, optional
If 'face' (the default), draws the edges in the same color as the
fill color.
If 'none', no edge is drawn; this can sometimes lead to unsightly
unpainted pixels between the hexagons.
If *None*, draws outlines in the default color.
If a matplotlib color arg, draws outlines in the specified color.
Returns
-------
polycollection
A `.PolyCollection` instance; use `.PolyCollection.get_array` on
this to get the counts in each hexagon.
If *marginals* is *True*, horizontal
bar and vertical bar (both PolyCollections) will be attached
to the return collection as attributes *hbar* and *vbar*.
Notes
-----
The standard descriptions of all the
:class:`~matplotlib.collections.Collection` parameters:
%(Collection)s
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if np.iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx / math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale == 'log':
if np.any(x <= 0.0):
raise ValueError("x contains non-positive values, so can not"
" be log-scaled")
x = np.log10(x)
if yscale == 'log':
if np.any(y <= 0.0):
raise ValueError("y contains non-positive values, so can not"
" be log-scaled")
y = np.log10(y)
if extent is not None:
xmin, xmax, ymin, ymax = extent
else:
xmin, xmax = (np.min(x), np.max(x)) if len(x) else (0, 1)
ymin, ymax = (np.min(y), np.max(y)) if len(y) else (0, 1)
# to avoid issues with singular data, expand the min/max pairs
xmin, xmax = mtransforms.nonsingular(xmin, xmax, expander=0.1)
ymin, ymax = mtransforms.nonsingular(ymin, ymax, expander=0.1)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax - xmin) / nx
sy = (ymax - ymin) / ny
if marginals:
xorig = x.copy()
yorig = y.copy()
x = (x - xmin) / sx
y = (y - ymin) / sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1 * ny1 + nx2 * ny2
d1 = (x - ix1) ** 2 + 3.0 * (y - iy1) ** 2
d2 = (x - ix2 - 0.5) ** 2 + 3.0 * (y - iy2 - 0.5) ** 2
bdist = (d1 < d2)
if C is None:
lattice1 = np.zeros((nx1, ny1))
lattice2 = np.zeros((nx2, ny2))
c1 = (0 <= ix1) & (ix1 < nx1) & (0 <= iy1) & (iy1 < ny1) & bdist
c2 = (0 <= ix2) & (ix2 < nx2) & (0 <= iy2) & (iy2 < ny2) & ~bdist
np.add.at(lattice1, (ix1[c1], iy1[c1]), 1)
np.add.at(lattice2, (ix2[c2], iy2[c2]), 1)
if mincnt is not None:
lattice1[lattice1 < mincnt] = np.nan
lattice2[lattice2 < mincnt] = np.nan
accum = np.concatenate([lattice1.ravel(), lattice2.ravel()])
good_idxs = ~np.isnan(accum)
else:
if mincnt is None:
mincnt = 0
# create accumulation arrays
lattice1 = np.empty((nx1, ny1), dtype=object)
for i in range(nx1):
for j in range(ny1):
lattice1[i, j] = []
lattice2 = np.empty((nx2, ny2), dtype=object)
for i in range(nx2):
for j in range(ny2):
lattice2[i, j] = []
for i in range(len(x)):
if bdist[i]:
if 0 <= ix1[i] < nx1 and 0 <= iy1[i] < ny1:
lattice1[ix1[i], iy1[i]].append(C[i])
else:
if 0 <= ix2[i] < nx2 and 0 <= iy2[i] < ny2:
lattice2[ix2[i], iy2[i]].append(C[i])
for i in range(nx1):
for j in range(ny1):
vals = lattice1[i, j]
if len(vals) > mincnt:
lattice1[i, j] = reduce_C_function(vals)
else:
lattice1[i, j] = np.nan
for i in range(nx2):
for j in range(ny2):
vals = lattice2[i, j]
if len(vals) > mincnt:
lattice2[i, j] = reduce_C_function(vals)
else:
lattice2[i, j] = np.nan
accum = np.hstack((lattice1.astype(float).ravel(),
lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
offsets = np.zeros((n, 2), float)
offsets[:nx1 * ny1, 0] = np.repeat(np.arange(nx1), ny1)
offsets[:nx1 * ny1, 1] = np.tile(np.arange(ny1), nx1)
offsets[nx1 * ny1:, 0] = np.repeat(np.arange(nx2) + 0.5, ny2)
offsets[nx1 * ny1:, 1] = np.tile(np.arange(ny2), nx2) + 0.5
offsets[:, 0] *= sx
offsets[:, 1] *= sy
offsets[:, 0] += xmin
offsets[:, 1] += ymin
# remove accumulation bins with no data
offsets = offsets[good_idxs, :]
accum = accum[good_idxs]
polygon = [sx, sy / 3] * np.array(
[[.5, -.5], [.5, .5], [0., 1.], [-.5, .5], [-.5, -.5], [0., -1.]])
if linewidths is None:
linewidths = [1.0]
if xscale == 'log' or yscale == 'log':
polygons = np.expand_dims(polygon, 0) + np.expand_dims(offsets, 1)
if xscale == 'log':
polygons[:, :, 0] = 10.0 ** polygons[:, :, 0]
xmin = 10.0 ** xmin
xmax = 10.0 ** xmax
self.set_xscale(xscale)
if yscale == 'log':
polygons[:, :, 1] = 10.0 ** polygons[:, :, 1]
ymin = 10.0 ** ymin
ymax = 10.0 ** ymax
self.set_yscale(yscale)
collection = mcoll.PolyCollection(
polygons,
edgecolors=edgecolors,
linewidths=linewidths,
)
else:
collection = mcoll.PolyCollection(
[polygon],
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=mtransforms.IdentityTransform(),
offset_position="data"
)
# Check for valid norm
if norm is not None and not isinstance(norm, mcolors.Normalize):
msg = "'norm' must be an instance of 'mcolors.Normalize'"
raise ValueError(msg)
# Set normalizer if bins is 'log'
if bins == 'log':
if norm is not None:
cbook._warn_external("Only one of 'bins' and 'norm' "
"arguments can be supplied, ignoring "
"bins={}".format(bins))
else:
norm = mcolors.LogNorm()
bins = None
if isinstance(norm, mcolors.LogNorm):
if (accum == 0).any():
# make sure we have no zeros
accum += 1
# autoscale the norm with curren accum values if it hasn't
# been set
if norm is not None:
if norm.vmin is None and norm.vmax is None:
norm.autoscale(accum)
if bins is not None:
if not np.iterable(bins):
minimum, maximum = min(accum), max(accum)
bins -= 1 # one less edge than bins
bins = minimum + (maximum - minimum) * np.arange(bins) / bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim(corners)
collection.sticky_edges.x[:] = [xmin, xmax]
collection.sticky_edges.y[:] = [ymin, ymax]
self.autoscale_view(tight=True)
# add the collection last
self.add_collection(collection, autolim=False)
if not marginals:
return collection
if C is None:
C = np.ones(len(x))
def coarse_bin(x, y, coarse):
ind = coarse.searchsorted(x).clip(0, len(coarse) - 1)
mus = np.zeros(len(coarse))
for i in range(len(coarse)):
yi = y[ind == i]
if len(yi) > 0:
mu = reduce_C_function(yi)
else:
mu = np.nan
mus[i] = mu
return mus
coarse = np.linspace(xmin, xmax, gridsize)
xcoarse = coarse_bin(xorig, C, coarse)
valid = ~np.isnan(xcoarse)
verts, values = [], []
for i, val in enumerate(xcoarse):
thismin = coarse[i]
if i < len(coarse) - 1:
thismax = coarse[i + 1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]:
continue
verts.append([(thismin, 0),
(thismin, 0.05),
(thismax, 0.05),
(thismax, 0)])
values.append(val)
values = np.array(values)
trans = self.get_xaxis_transform(which='grid')
hbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
hbar.set_array(values)
hbar.set_cmap(cmap)
hbar.set_norm(norm)
hbar.set_alpha(alpha)
hbar.update(kwargs)
self.add_collection(hbar, autolim=False)
coarse = np.linspace(ymin, ymax, gridsize)
ycoarse = coarse_bin(yorig, C, coarse)
valid = ~np.isnan(ycoarse)
verts, values = [], []
for i, val in enumerate(ycoarse):
thismin = coarse[i]
if i < len(coarse) - 1:
thismax = coarse[i + 1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]:
continue
verts.append([(0, thismin), (0.0, thismax),
(0.05, thismax), (0.05, thismin)])
values.append(val)
values = np.array(values)
trans = self.get_yaxis_transform(which='grid')
vbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
vbar.set_array(values)
vbar.set_cmap(cmap)
vbar.set_norm(norm)
vbar.set_alpha(alpha)
vbar.update(kwargs)
self.add_collection(vbar, autolim=False)
collection.hbar = hbar
collection.vbar = vbar
def on_changed(collection):
hbar.set_cmap(collection.get_cmap())
hbar.set_clim(collection.get_clim())
vbar.set_cmap(collection.get_cmap())
vbar.set_clim(collection.get_clim())
collection.callbacksSM.connect('changed', on_changed)
return collection
@docstring.dedent_interpd
def arrow(self, x, y, dx, dy, **kwargs):
"""
Add an arrow to the axes.
This draws an arrow from ``(x, y)`` to ``(x+dx, y+dy)``.
Parameters
----------
x, y : float
The x and y coordinates of the arrow base.
dx, dy : float
The length of the arrow along x and y direction.
Returns
-------
arrow : `.FancyArrow`
The created `.FancyArrow` object.
Other Parameters
----------------
**kwargs
Optional kwargs (inherited from `.FancyArrow` patch) control the
arrow construction and properties:
%(FancyArrow)s
Notes
-----
The resulting arrow is affected by the axes aspect ratio and limits.
This may produce an arrow whose head is not square with its stem. To
create an arrow whose head is square with its stem,
use :meth:`annotate` for example:
>>> ax.annotate("", xy=(0.5, 0.5), xytext=(0, 0),
... arrowprops=dict(arrowstyle="->"))
"""
# Strip away units for the underlying patch since units
# do not make sense to most patch-like code
x = self.convert_xunits(x)
y = self.convert_yunits(y)
dx = self.convert_xunits(dx)
dy = self.convert_yunits(dy)
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
def quiverkey(self, Q, X, Y, U, label, **kw):
qk = mquiver.QuiverKey(Q, X, Y, U, label, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
# Handle units for x and y, if they've been passed
def _quiver_units(self, args, kw):
if len(args) > 3:
x, y = args[0:2]
self._process_unit_info(xdata=x, ydata=y, kwargs=kw)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
return (x, y) + args[2:]
return args
# args can by a combination if X, Y, U, V, C and all should be replaced
@_preprocess_data()
def quiver(self, *args, **kw):
# Make sure units are handled for x and y values
args = self._quiver_units(args, kw)
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, autolim=True)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
# args can be some combination of X, Y, U, V, C and all should be replaced
@_preprocess_data()
@docstring.dedent_interpd
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
"""
# Make sure units are handled for x and y values
args = self._quiver_units(args, kw)
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b, autolim=True)
self.autoscale_view()
return b
# Uses a custom implementation of data-kwarg handling in
# _process_plot_var_args.
def fill(self, *args, data=None, **kwargs):
"""
Plot filled polygons.
Parameters
----------
*args : sequence of x, y, [color]
Each polygon is defined by the lists of *x* and *y* positions of
its nodes, optionally followed by a *color* specifier. See
:mod:`matplotlib.colors` for supported color specifiers. The
standard color cycle is used for polygons without a color
specifier.
You can plot multiple polygons by providing multiple *x*, *y*,
*[color]* groups.
For example, each of the following is legal::
ax.fill(x, y) # a polygon with default color
ax.fill(x, y, "b") # a blue polygon
ax.fill(x, y, x2, y2) # two polygons
ax.fill(x, y, "b", x2, y2, "r") # a blue and a red polygon
data : indexable object, optional
An object with labelled data. If given, provide the label names to
plot in *x* and *y*, e.g.::
ax.fill("time", "signal",
data={"time": [0, 1, 2], "signal": [0, 1, 0]})
Returns
-------
a list of :class:`~matplotlib.patches.Polygon`
Other Parameters
----------------
**kwargs : :class:`~matplotlib.patches.Polygon` properties
Notes
-----
Use :meth:`fill_between` if you would like to fill the region between
two curves.
"""
# For compatibility(!), get aliases from Line2D rather than Patch.
kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
patches = []
for poly in self._get_patches_for_fill(*args, data=data, **kwargs):
self.add_patch(poly)
patches.append(poly)
self.autoscale_view()
return patches
@_preprocess_data(replace_names=["x", "y1", "y2", "where"])
@docstring.dedent_interpd
def fill_between(self, x, y1, y2=0, where=None, interpolate=False,
step=None, **kwargs):
"""
Fill the area between two horizontal curves.
The curves are defined by the points (*x*, *y1*) and (*x*, *y2*). This
creates one or multiple polygons describing the filled area.
You may exclude some horizontal sections from filling using *where*.
By default, the edges connect the given points directly. Use *step* if
the filling should be a step function, i.e. constant in between *x*.
Parameters
----------
x : array (length N)
The x coordinates of the nodes defining the curves.
y1 : array (length N) or scalar
The y coordinates of the nodes defining the first curve.
y2 : array (length N) or scalar, optional, default: 0
The y coordinates of the nodes defining the second curve.
where : array of bool (length N), optional, default: None
Define *where* to exclude some horizontal regions from being
filled. The filled regions are defined by the coordinates
``x[where]``. More precisely, fill between ``x[i]`` and ``x[i+1]``
if ``where[i] and where[i+1]``. Note that this definition implies
that an isolated *True* value between two *False* values in
*where* will not result in filling. Both sides of the *True*
position remain unfilled due to the adjacent *False* values.
interpolate : bool, optional
This option is only relevant if *where* is used and the two curves
are crossing each other.
Semantically, *where* is often used for *y1* > *y2* or similar.
By default, the nodes of the polygon defining the filled region
will only be placed at the positions in the *x* array. Such a
polygon cannot describe the above semantics close to the
intersection. The x-sections containing the intersection are
simply clipped.
Setting *interpolate* to *True* will calculate the actual
intersection point and extend the filled region up to this point.
step : {'pre', 'post', 'mid'}, optional
Define *step* if the filling should be a step function,
i.e. constant in between *x*. The value determines where the
step will occur:
- 'pre': The y value is continued constantly to the left from
every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
value ``y[i]``.
- 'post': The y value is continued constantly to the right from
every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
value ``y[i]``.
- 'mid': Steps occur half-way between the *x* positions.
Other Parameters
----------------
**kwargs
All other keyword arguments are passed on to `.PolyCollection`.
They control the `.Polygon` properties:
%(PolyCollection)s
Returns
-------
`.PolyCollection`
A `.PolyCollection` containing the plotted polygons.
See Also
--------
fill_betweenx : Fill between two sets of x-values.
Notes
-----
.. [notes section required to get data note injection right]
"""
if not rcParams['_internal.classic_mode']:
kwargs = cbook.normalize_kwargs(kwargs, mcoll.Collection)
if not any(c in kwargs for c in ('color', 'facecolor')):
kwargs['facecolor'] = \
self._get_patches_for_fill.get_next_color()
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = ma.masked_invalid(self.convert_xunits(x))
y1 = ma.masked_invalid(self.convert_yunits(y1))
y2 = ma.masked_invalid(self.convert_yunits(y2))
for name, array in [('x', x), ('y1', y1), ('y2', y2)]:
if array.ndim > 1:
raise ValueError('Input passed into argument "%r"' % name +
'is not 1-dimensional.')
if where is None:
where = True
where = where & ~functools.reduce(np.logical_or,
map(np.ma.getmask, [x, y1, y2]))
x, y1, y2 = np.broadcast_arrays(np.atleast_1d(x), y1, y2)
polys = []
for ind0, ind1 in cbook.contiguous_regions(where):
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if step is not None:
step_func = cbook.STEP_LOOKUP_MAP["steps-" + step]
xslice, y1slice, y2slice = step_func(xslice, y1slice, y2slice)
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2 * N + 2, 2), float)
if interpolate:
def get_interp_point(ind):
im1 = max(ind - 1, 0)
x_values = x[im1:ind + 1]
diff_values = y1[im1:ind + 1] - y2[im1:ind + 1]
y1_values = y1[im1:ind + 1]
if len(diff_values) == 2:
if np.ma.is_masked(diff_values[1]):
return x[im1], y1[im1]
elif np.ma.is_masked(diff_values[0]):
return x[ind], y1[ind]
diff_order = diff_values.argsort()
diff_root_x = np.interp(
0, diff_values[diff_order], x_values[diff_order])
x_order = x_values.argsort()
diff_root_y = np.interp(diff_root_x, x_values[x_order],
y1_values[x_order])
return diff_root_x, diff_root_y
start = get_interp_point(ind0)
end = get_interp_point(ind1)
else:
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
X[0] = start
X[N + 1] = end
X[1:N + 1, 0] = xslice
X[1:N + 1, 1] = y1slice
X[N + 2:, 0] = xslice[::-1]
X[N + 2:, 1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.ignore_existing_data_limits = False
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection, autolim=False)
self.autoscale_view()
return collection
@_preprocess_data(replace_names=["y", "x1", "x2", "where"])
@docstring.dedent_interpd
def fill_betweenx(self, y, x1, x2=0, where=None,
step=None, interpolate=False, **kwargs):
"""
Fill the area between two vertical curves.
The curves are defined by the points (*x1*, *y*) and (*x2*, *y*). This
creates one or multiple polygons describing the filled area.
You may exclude some vertical sections from filling using *where*.
By default, the edges connect the given points directly. Use *step* if
the filling should be a step function, i.e. constant in between *y*.
Parameters
----------
y : array (length N)
The y coordinates of the nodes defining the curves.
x1 : array (length N) or scalar
The x coordinates of the nodes defining the first curve.
x2 : array (length N) or scalar, optional, default: 0
The x coordinates of the nodes defining the second curve.
where : array of bool (length N), optional, default: None
Define *where* to exclude some vertical regions from being
filled. The filled regions are defined by the coordinates
``y[where]``. More precisely, fill between ``y[i]`` and ``y[i+1]``
if ``where[i] and where[i+1]``. Note that this definition implies
that an isolated *True* value between two *False* values in
*where* will not result in filling. Both sides of the *True*
position remain unfilled due to the adjacent *False* values.
interpolate : bool, optional
This option is only relevant if *where* is used and the two curves
are crossing each other.
Semantically, *where* is often used for *x1* > *x2* or similar.
By default, the nodes of the polygon defining the filled region
will only be placed at the positions in the *y* array. Such a
polygon cannot describe the above semantics close to the
intersection. The y-sections containing the intersection are
simply clipped.
Setting *interpolate* to *True* will calculate the actual
intersection point and extend the filled region up to this point.
step : {'pre', 'post', 'mid'}, optional
Define *step* if the filling should be a step function,
i.e. constant in between *y*. The value determines where the
step will occur:
- 'pre': The y value is continued constantly to the left from
every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
value ``y[i]``.
- 'post': The y value is continued constantly to the right from
every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
value ``y[i]``.
- 'mid': Steps occur half-way between the *x* positions.
Other Parameters
----------------
**kwargs
All other keyword arguments are passed on to `.PolyCollection`.
They control the `.Polygon` properties:
%(PolyCollection)s
Returns
-------
`.PolyCollection`
A `.PolyCollection` containing the plotted polygons.
See Also
--------
fill_between : Fill between two sets of y-values.
Notes
-----
.. [notes section required to get data note injection right]
"""
if not rcParams['_internal.classic_mode']:
kwargs = cbook.normalize_kwargs(kwargs, mcoll.Collection)
if not any(c in kwargs for c in ('color', 'facecolor')):
kwargs['facecolor'] = \
self._get_patches_for_fill.get_next_color()
# Handle united data, such as dates
self._process_unit_info(ydata=y, xdata=x1, kwargs=kwargs)
self._process_unit_info(xdata=x2)
# Convert the arrays so we can work with them
y = ma.masked_invalid(self.convert_yunits(y))
x1 = ma.masked_invalid(self.convert_xunits(x1))
x2 = ma.masked_invalid(self.convert_xunits(x2))
for name, array in [('y', y), ('x1', x1), ('x2', x2)]:
if array.ndim > 1:
raise ValueError('Input passed into argument "%r"' % name +
'is not 1-dimensional.')
if where is None:
where = True
where = where & ~functools.reduce(np.logical_or,
map(np.ma.getmask, [y, x1, x2]))
y, x1, x2 = np.broadcast_arrays(np.atleast_1d(y), x1, x2)
polys = []
for ind0, ind1 in cbook.contiguous_regions(where):
yslice = y[ind0:ind1]
x1slice = x1[ind0:ind1]
x2slice = x2[ind0:ind1]
if step is not None:
step_func = cbook.STEP_LOOKUP_MAP["steps-" + step]
yslice, x1slice, x2slice = step_func(yslice, x1slice, x2slice)
if not len(yslice):
continue
N = len(yslice)
Y = np.zeros((2 * N + 2, 2), float)
if interpolate:
def get_interp_point(ind):
im1 = max(ind - 1, 0)
y_values = y[im1:ind + 1]
diff_values = x1[im1:ind + 1] - x2[im1:ind + 1]
x1_values = x1[im1:ind + 1]
if len(diff_values) == 2:
if np.ma.is_masked(diff_values[1]):
return x1[im1], y[im1]
elif np.ma.is_masked(diff_values[0]):
return x1[ind], y[ind]
diff_order = diff_values.argsort()
diff_root_y = np.interp(
0, diff_values[diff_order], y_values[diff_order])
y_order = y_values.argsort()
diff_root_x = np.interp(diff_root_y, y_values[y_order],
x1_values[y_order])
return diff_root_x, diff_root_y
start = get_interp_point(ind0)
end = get_interp_point(ind1)
else:
# the purpose of the next two lines is for when x2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the x1 sample points do
start = x2slice[0], yslice[0]
end = x2slice[-1], yslice[-1]
Y[0] = start
Y[N + 1] = end
Y[1:N + 1, 0] = x1slice
Y[1:N + 1, 1] = yslice
Y[N + 2:, 0] = x2slice[::-1]
Y[N + 2:, 1] = yslice[::-1]
polys.append(Y)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
X1Y = np.array([x1[where], y[where]]).T
X2Y = np.array([x2[where], y[where]]).T
self.dataLim.update_from_data_xy(X1Y, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.ignore_existing_data_limits = False
self.dataLim.update_from_data_xy(X2Y, self.ignore_existing_data_limits,
updatex=True, updatey=False)
self.add_collection(collection, autolim=False)
self.autoscale_view()
return collection
#### plotting z(x,y): imshow, pcolor and relatives, contour
@_preprocess_data()
@cbook._delete_parameter("3.1", "shape")
@cbook._delete_parameter("3.1", "imlim")
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=None, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
Display an image, i.e. data on a 2D regular raster.
Parameters
----------
X : array-like or PIL image
The image data. Supported array shapes are:
- (M, N): an image with scalar data. The data is visualized
using a colormap.
- (M, N, 3): an image with RGB values (0-1 float or 0-255 int).
- (M, N, 4): an image with RGBA values (0-1 float or 0-255 int),
i.e. including transparency.
The first two dimensions (M, N) define the rows and columns of
the image.
Out-of-range RGB(A) values are clipped.
cmap : str or `~matplotlib.colors.Colormap`, optional
The Colormap instance or registered colormap name used to map
scalar data to colors. This parameter is ignored for RGB(A) data.
Defaults to :rc:`image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional
The `Normalize` instance used to scale scalar data to the [0, 1]
range before mapping to colors using *cmap*. By default, a linear
scaling mapping the lowest value to 0 and the highest to 1 is used.
This parameter is ignored for RGB(A) data.
aspect : {'equal', 'auto'} or float, optional
Controls the aspect ratio of the axes. The aspect is of particular
relevance for images since it may distort the image, i.e. pixel
will not be square.
This parameter is a shortcut for explicitly calling
`.Axes.set_aspect`. See there for further details.
- 'equal': Ensures an aspect ratio of 1. Pixels will be square
(unless pixel sizes are explicitly made non-square in data
coordinates using *extent*).
- 'auto': The axes is kept fixed and the aspect is adjusted so
that the data fit in the axes. In general, this will result in
non-square pixels.
If not given, use :rc:`image.aspect` (default: 'equal').
interpolation : str, optional
The interpolation method used. If *None*
:rc:`image.interpolation` is used, which defaults to 'nearest'.
Supported values are 'none', 'nearest', 'bilinear', 'bicubic',
'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser',
'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc',
'lanczos'.
If *interpolation* is 'none', then no interpolation is performed
on the Agg, ps, pdf and svg backends. Other backends will fall back
to 'nearest'. Note that most SVG renders perform interpolation at
rendering and that the default interpolation method they implement
may differ.
See
:doc:`/gallery/images_contours_and_fields/interpolation_methods`
for an overview of the supported interpolation methods.
Some interpolation methods require an additional radius parameter,
which can be set by *filterrad*. Additionally, the antigrain image
resize filter is controlled by the parameter *filternorm*.
alpha : scalar, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
This parameter is ignored for RGBA input data.
vmin, vmax : scalar, optional
When using scalar data and no explicit *norm*, *vmin* and *vmax*
define the data range that the colormap covers. By default,
the colormap covers the complete value range of the supplied
data. *vmin*, *vmax* are ignored if the *norm* parameter is used.
origin : {'upper', 'lower'}, optional
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. The convention 'upper' is typically used for
matrices and images.
If not given, :rc:`image.origin` is used, defaulting to 'upper'.
Note that the vertical axes points upward for 'lower'
but downward for 'upper'.
extent : scalars (left, right, bottom, top), optional
The bounding box in data coordinates that the image will fill.
The image is stretched individually along x and y to fill the box.
The default extent is determined by the following conditions.
Pixels have unit size in data coordinates. Their centers are on
integer coordinates, and their center coordinates range from 0 to
columns-1 horizontally and from 0 to rows-1 vertically.
Note that the direction of the vertical axis and thus the default
values for top and bottom depend on *origin*:
- For ``origin == 'upper'`` the default is
``(-0.5, numcols-0.5, numrows-0.5, -0.5)``.
- For ``origin == 'lower'`` the default is
``(-0.5, numcols-0.5, -0.5, numrows-0.5)``.
See the example :doc:`/tutorials/intermediate/imshow_extent` for a
more detailed description.
filternorm : bool, optional, default: True
A parameter for the antigrain image resize filter (see the
antigrain documentation). If *filternorm* is set, the filter
normalizes integer values and corrects the rounding errors. It
doesn't do anything with the source floating point values, it
corrects only integers according to the rule of 1.0 which means
that any sum of pixel weights must be equal to 1.0. So, the
filter function must produce a graph of the proper shape.
filterrad : float > 0, optional, default: 4.0
The filter radius for filters that have a radius parameter, i.e.
when interpolation is one of: 'sinc', 'lanczos' or 'blackman'.
resample : bool, optional
When *True*, use a full resampling method. When *False*, only
resample when the output image is larger than the input image.
url : str, optional
Set the url of the created `.AxesImage`. See `.Artist.set_url`.
Returns
-------
image : `~matplotlib.image.AxesImage`
Other Parameters
----------------
**kwargs : `~matplotlib.artist.Artist` properties
These parameters are passed on to the constructor of the
`.AxesImage` artist.
See also
--------
matshow : Plot a matrix or an array as an image.
Notes
-----
Unless *extent* is used, pixel centers will be located at integer
coordinates. In other words: the origin will coincide with the center
of pixel (0, 0).
There are two common representations for RGB images with an alpha
channel:
- Straight (unassociated) alpha: R, G, and B channels represent the
color of the pixel, disregarding its opacity.
- Premultiplied (associated) alpha: R, G, and B channels represent
the color of the pixel, adjusted for its opacity by multiplication.
`~matplotlib.pyplot.imshow` expects RGB images adopting the straight
(unassociated) alpha representation.
"""
if norm is not None and not isinstance(norm, mcolors.Normalize):
raise ValueError(
"'norm' must be an instance of 'mcolors.Normalize'")
if aspect is None:
aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm, filterrad=filterrad,
resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(self.patch)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
self.add_image(im)
return im
@staticmethod
def _pcolorargs(funcname, *args, allmatch=False):
# If allmatch is True, then the incoming X, Y, C must have matching
# dimensions, taking into account that X and Y can be 1-D rather than
# 2-D. This perfect match is required for Gouraud shading. For flat
# shading, X and Y specify boundaries, so we need one more boundary
# than color in each direction. For convenience, and consistent with
# Matlab, we discard the last row and/or column of C if necessary to
# meet this condition. This is done if allmatch is False.
if len(args) == 1:
C = np.asanyarray(args[0])
numRows, numCols = C.shape
if allmatch:
X, Y = np.meshgrid(np.arange(numCols), np.arange(numRows))
else:
X, Y = np.meshgrid(np.arange(numCols + 1),
np.arange(numRows + 1))
C = cbook.safe_masked_invalid(C)
return X, Y, C
if len(args) == 3:
# Check x and y for bad data...
C = np.asanyarray(args[2])
X, Y = [cbook.safe_masked_invalid(a) for a in args[:2]]
if funcname == 'pcolormesh':
if np.ma.is_masked(X) or np.ma.is_masked(Y):
raise ValueError(
'x and y arguments to pcolormesh cannot have '
'non-finite values or be of type '
'numpy.ma.core.MaskedArray with masked values')
# safe_masked_invalid() returns an ndarray for dtypes other
# than floating point.
if isinstance(X, np.ma.core.MaskedArray):
X = X.data # strip mask as downstream doesn't like it...
if isinstance(Y, np.ma.core.MaskedArray):
Y = Y.data
numRows, numCols = C.shape
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if X.ndim != 2 or X.shape[0] == 1:
x = X.reshape(1, Nx)
X = x.repeat(Ny, axis=0)
if Y.ndim != 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
if allmatch:
if (Nx, Ny) != (numCols, numRows):
raise TypeError('Dimensions of C %s are incompatible with'
' X (%d) and/or Y (%d); see help(%s)' % (
C.shape, Nx, Ny, funcname))
else:
if not (numCols in (Nx, Nx - 1) and numRows in (Ny, Ny - 1)):
raise TypeError('Dimensions of C %s are incompatible with'
' X (%d) and/or Y (%d); see help(%s)' % (
C.shape, Nx, Ny, funcname))
C = C[:Ny - 1, :Nx - 1]
C = cbook.safe_masked_invalid(C)
return X, Y, C
@_preprocess_data()
@docstring.dedent_interpd
def pcolor(self, *args, alpha=None, norm=None, cmap=None, vmin=None,
vmax=None, **kwargs):
r"""
Create a pseudocolor plot with a non-regular rectangular grid.
Call signature::
pcolor([X, Y,] C, **kwargs)
*X* and *Y* can be used to specify the corners of the quadrilaterals.
.. hint::
``pcolor()`` can be very slow for large arrays. In most
cases you should use the similar but much faster
`~.Axes.pcolormesh` instead. See there for a discussion of the
differences.
Parameters
----------
C : array_like
A scalar 2-D array. The values will be color-mapped.
X, Y : array_like, optional
The coordinates of the quadrilateral corners. The quadrilateral
for ``C[i,j]`` has corners at::
(X[i+1, j], Y[i+1, j]) (X[i+1, j+1], Y[i+1, j+1])
+--------+
| C[i,j] |
+--------+
(X[i, j], Y[i, j]) (X[i, j+1], Y[i, j+1]),
Note that the column index corresponds to the
x-coordinate, and the row index corresponds to y. For
details, see the :ref:`Notes <axes-pcolor-grid-orientation>`
section below.
The dimensions of *X* and *Y* should be one greater than those of
*C*. Alternatively, *X*, *Y* and *C* may have equal dimensions, in
which case the last row and column of *C* will be ignored.
If *X* and/or *Y* are 1-D arrays or column vectors they will be
expanded as needed into the appropriate 2-D arrays, making a
rectangular grid.
cmap : str or `~matplotlib.colors.Colormap`, optional
A Colormap instance or registered colormap name. The colormap
maps the *C* values to colors. Defaults to :rc:`image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional
The Normalize instance scales the data values to the canonical
colormap range [0, 1] for mapping to colors. By default, the data
range is mapped to the colorbar range using linear scaling.
vmin, vmax : scalar, optional, default: None
The colorbar range. If *None*, suitable min/max values are
automatically chosen by the `~.Normalize` instance (defaults to
the respective min/max values of *C* in case of the default linear
scaling).
edgecolors : {'none', None, 'face', color, color sequence}, optional
The color of the edges. Defaults to 'none'. Possible values:
- 'none' or '': No edge.
- *None*: :rc:`patch.edgecolor` will be used. Note that currently
:rc:`patch.force_edgecolor` has to be True for this to work.
- 'face': Use the adjacent face color.
- An mpl color or sequence of colors will set the edge color.
The singular form *edgecolor* works as an alias.
alpha : scalar, optional, default: None
The alpha blending value of the face color, between 0 (transparent)
and 1 (opaque). Note: The edgecolor is currently not affected by
this.
snap : bool, optional, default: False
Whether to snap the mesh to pixel boundaries.
Returns
-------
collection : `matplotlib.collections.Collection`
Other Parameters
----------------
antialiaseds : bool, optional, default: False
The default *antialiaseds* is False if the default
*edgecolors*\ ="none" is used. This eliminates artificial lines
at patch boundaries, and works regardless of the value of alpha.
If *edgecolors* is not "none", then the default *antialiaseds*
is taken from :rc:`patch.antialiased`, which defaults to True.
Stroking the edges may be preferred if *alpha* is 1, but will
cause artifacts otherwise.
**kwargs
Additionally, the following arguments are allowed. They are passed
along to the `~matplotlib.collections.PolyCollection` constructor:
%(PolyCollection)s
See Also
--------
pcolormesh : for an explanation of the differences between
pcolor and pcolormesh.
imshow : If *X* and *Y* are each equidistant, `~.Axes.imshow` can be a
faster alternative.
Notes
-----
**Masked arrays**
*X*, *Y* and *C* may be masked arrays. If either ``C[i, j]``, or one
of the vertices surrounding ``C[i,j]`` (*X* or *Y* at
``[i, j], [i+1, j], [i, j+1], [i+1, j+1]``) is masked, nothing is
plotted.
.. _axes-pcolor-grid-orientation:
**Grid orientation**
The grid orientation follows the standard matrix convention: An array
*C* with shape (nrows, ncolumns) is plotted with the column number as
*X* and the row number as *Y*.
**Handling of pcolor() end-cases**
``pcolor()`` displays all columns of *C* if *X* and *Y* are not
specified, or if *X* and *Y* have one more column than *C*.
If *X* and *Y* have the same number of columns as *C* then the last
column of *C* is dropped. Similarly for the rows.
Note: This behavior is different from MATLAB's ``pcolor()``, which
always discards the last row and column of *C*.
"""
X, Y, C = self._pcolorargs('pcolor', *args, allmatch=False)
Ny, Nx = X.shape
# unit conversion allows e.g. datetime objects as axis values
self._process_unit_info(xdata=X, ydata=Y, kwargs=kwargs)
X = self.convert_xunits(X)
Y = self.convert_yunits(Y)
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X) + ma.getmaskarray(Y)
xymask = (mask[0:-1, 0:-1] + mask[1:, 1:] +
mask[0:-1, 1:] + mask[1:, 0:-1])
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C) + xymask
unmask = ~mask
X1 = ma.filled(X[:-1, :-1])[unmask]
Y1 = ma.filled(Y[:-1, :-1])[unmask]
X2 = ma.filled(X[1:, :-1])[unmask]
Y2 = ma.filled(Y[1:, :-1])[unmask]
X3 = ma.filled(X[1:, 1:])[unmask]
Y3 = ma.filled(Y[1:, 1:])[unmask]
X4 = ma.filled(X[:-1, 1:])[unmask]
Y4 = ma.filled(Y[:-1, 1:])[unmask]
npoly = len(X1)
xy = np.stack([X1, Y1, X2, Y2, X3, Y3, X4, Y4, X1, Y1], axis=-1)
verts = xy.reshape((npoly, 5, 2))
C = ma.filled(C[:Ny - 1, :Nx - 1])[unmask]
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', 'none')
# aa setting will default via collections to patch.antialiased
# unless the boundary is not stroked, in which case the
# default will be False; with unstroked boundaries, aa
# makes artifacts that are often disturbing.
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and cbook._str_lower_equal(ec, "none"):
kwargs['antialiaseds'] = False
kwargs.setdefault('snap', False)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None and not isinstance(norm, mcolors.Normalize):
raise ValueError(
"'norm' must be an instance of 'mcolors.Normalize'")
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform) and
hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
pts = np.vstack([x, y]).T.astype(float)
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
self.add_collection(collection, autolim=False)
minx = np.min(x)
maxx = np.max(x)
miny = np.min(y)
maxy = np.max(y)
collection.sticky_edges.x[:] = [minx, maxx]
collection.sticky_edges.y[:] = [miny, maxy]
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return collection
@_preprocess_data()
@docstring.dedent_interpd
def pcolormesh(self, *args, alpha=None, norm=None, cmap=None, vmin=None,
vmax=None, shading='flat', antialiased=False, **kwargs):
"""
Create a pseudocolor plot with a non-regular rectangular grid.
Call signature::
pcolor([X, Y,] C, **kwargs)
*X* and *Y* can be used to specify the corners of the quadrilaterals.
.. note::
`~Axes.pcolormesh` is similar to `~Axes.pcolor`. It's much faster
and preferred in most cases. For a detailed discussion on the
differences see :ref:`Differences between pcolor() and pcolormesh()
<differences-pcolor-pcolormesh>`.
Parameters
----------
C : array_like
A scalar 2-D array. The values will be color-mapped.
X, Y : array_like, optional
The coordinates of the quadrilateral corners. The quadrilateral
for ``C[i,j]`` has corners at::
(X[i+1, j], Y[i+1, j]) (X[i+1, j+1], Y[i+1, j+1])
+--------+
| C[i,j] |
+--------+
(X[i, j], Y[i, j]) (X[i, j+1], Y[i, j+1]),
Note that the column index corresponds to the
x-coordinate, and the row index corresponds to y. For
details, see the :ref:`Notes <axes-pcolormesh-grid-orientation>`
section below.
The dimensions of *X* and *Y* should be one greater than those of
*C*. Alternatively, *X*, *Y* and *C* may have equal dimensions, in
which case the last row and column of *C* will be ignored.
If *X* and/or *Y* are 1-D arrays or column vectors they will be
expanded as needed into the appropriate 2-D arrays, making a
rectangular grid.
cmap : str or `~matplotlib.colors.Colormap`, optional
A Colormap instance or registered colormap name. The colormap
maps the *C* values to colors. Defaults to :rc:`image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional
The Normalize instance scales the data values to the canonical
colormap range [0, 1] for mapping to colors. By default, the data
range is mapped to the colorbar range using linear scaling.
vmin, vmax : scalar, optional, default: None
The colorbar range. If *None*, suitable min/max values are
automatically chosen by the `~.Normalize` instance (defaults to
the respective min/max values of *C* in case of the default linear
scaling).
edgecolors : {'none', None, 'face', color, color sequence}, optional
The color of the edges. Defaults to 'none'. Possible values:
- 'none' or '': No edge.
- *None*: :rc:`patch.edgecolor` will be used. Note that currently
:rc:`patch.force_edgecolor` has to be True for this to work.
- 'face': Use the adjacent face color.
- An mpl color or sequence of colors will set the edge color.
The singular form *edgecolor* works as an alias.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque).
shading : {'flat', 'gouraud'}, optional
The fill style, Possible values:
- 'flat': A solid color is used for each quad. The color of the
quad (i, j), (i+1, j), (i, j+1), (i+1, j+1) is given by
``C[i,j]``.
- 'gouraud': Each quad will be Gouraud shaded: The color of the
corners (i', j') are given by ``C[i',j']``. The color values of
the area in between is interpolated from the corner values.
When Gouraud shading is used, *edgecolors* is ignored.
snap : bool, optional, default: False
Whether to snap the mesh to pixel boundaries.
Returns
-------
mesh : `matplotlib.collections.QuadMesh`
Other Parameters
----------------
**kwargs
Additionally, the following arguments are allowed. They are passed
along to the `~matplotlib.collections.QuadMesh` constructor:
%(QuadMesh)s
See Also
--------
pcolor : An alternative implementation with slightly different
features. For a detailed discussion on the differences see
:ref:`Differences between pcolor() and pcolormesh()
<differences-pcolor-pcolormesh>`.
imshow : If *X* and *Y* are each equidistant, `~.Axes.imshow` can be a
faster alternative.
Notes
-----
**Masked arrays**
*C* may be a masked array. If ``C[i, j]`` is masked, the corresponding
quadrilateral will be transparent. Masking of *X* and *Y* is not
supported. Use `~.Axes.pcolor` if you need this functionality.
.. _axes-pcolormesh-grid-orientation:
**Grid orientation**
The grid orientation follows the standard matrix convention: An array
*C* with shape (nrows, ncolumns) is plotted with the column number as
*X* and the row number as *Y*.
.. _differences-pcolor-pcolormesh:
**Differences between pcolor() and pcolormesh()**
Both methods are used to create a pseudocolor plot of a 2-D array
using quadrilaterals.
The main difference lies in the created object and internal data
handling:
While `~.Axes.pcolor` returns a `.PolyCollection`, `~.Axes.pcolormesh`
returns a `.QuadMesh`. The latter is more specialized for the given
purpose and thus is faster. It should almost always be preferred.
There is also a slight difference in the handling of masked arrays.
Both `~.Axes.pcolor` and `~.Axes.pcolormesh` support masked arrays
for *C*. However, only `~.Axes.pcolor` supports masked arrays for *X*
and *Y*. The reason lies in the internal handling of the masked values.
`~.Axes.pcolor` leaves out the respective polygons from the
PolyCollection. `~.Axes.pcolormesh` sets the facecolor of the masked
elements to transparent. You can see the difference when using
edgecolors. While all edges are drawn irrespective of masking in a
QuadMesh, the edge between two adjacent masked quadrilaterals in
`~.Axes.pcolor` is not drawn as the corresponding polygons do not
exist in the PolyCollection.
Another difference is the support of Gouraud shading in
`~.Axes.pcolormesh`, which is not available with `~.Axes.pcolor`.
"""
shading = shading.lower()
kwargs.setdefault('edgecolors', 'None')
allmatch = (shading == 'gouraud')
X, Y, C = self._pcolorargs('pcolormesh', *args, allmatch=allmatch)
Ny, Nx = X.shape
X = X.ravel()
Y = Y.ravel()
# unit conversion allows e.g. datetime objects as axis values
self._process_unit_info(xdata=X, ydata=Y, kwargs=kwargs)
X = self.convert_xunits(X)
Y = self.convert_yunits(Y)
# convert to one dimensional arrays
C = C.ravel()
coords = np.column_stack((X, Y)).astype(float, copy=False)
collection = mcoll.QuadMesh(Nx - 1, Ny - 1, coords,
antialiased=antialiased, shading=shading,
**kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None and not isinstance(norm, mcolors.Normalize):
raise ValueError(
"'norm' must be an instance of 'mcolors.Normalize'")
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform) and
hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
coords = trans_to_data.transform(coords)
self.add_collection(collection, autolim=False)
minx, miny = np.min(coords, axis=0)
maxx, maxy = np.max(coords, axis=0)
collection.sticky_edges.x[:] = [minx, maxx]
collection.sticky_edges.y[:] = [miny, maxy]
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return collection
@_preprocess_data()
@docstring.dedent_interpd
def pcolorfast(self, *args, alpha=None, norm=None, cmap=None, vmin=None,
vmax=None, **kwargs):
"""
Create a pseudocolor plot with a non-regular rectangular grid.
Call signature::
ax.pcolorfast([X, Y], C, /, **kwargs)
This method is similar to ~.Axes.pcolor` and `~.Axes.pcolormesh`.
It's designed to provide the fastest pcolor-type plotting with the
Agg backend. To achieve this, it uses different algorithms internally
depending on the complexity of the input grid (regular rectangular,
non-regular rectangular or arbitrary quadrilateral).
.. warning::
This method is experimental. Compared to `~.Axes.pcolor` or
`~.Axes.pcolormesh` it has some limitations:
- It supports only flat shading (no outlines)
- It lacks support for log scaling of the axes.
- It does not have a have a pyplot wrapper.
Parameters
----------
C : array-like(M, N)
A 2D array or masked array. The values will be color-mapped.
This argument can only be passed positionally.
C can in some cases be 3D with the last dimension as rgb(a).
This is available when C qualifies for image or pcolorimage type,
will throw a TypeError if C is 3D and quadmesh.
X, Y : tuple or array-like, default: ``(0, N)``, ``(0, M)``
*X* and *Y* are used to specify the coordinates of the
quadrilaterals. There are different ways to do this:
- Use tuples ``X=(xmin, xmax)`` and ``Y=(ymin, ymax)`` to define
a *uniform rectangular grid*.
The tuples define the outer edges of the grid. All individual
quadrilaterals will be of the same size. This is the fastest
version.
- Use 1D arrays *X*, *Y* to specify a *non-uniform rectangular
grid*.
In this case *X* and *Y* have to be monotonic 1D arrays of length
*N+1* and *M+1*, specifying the x and y boundaries of the cells.
The speed is intermediate. Note: The grid is checked, and if
found to be uniform the fast version is used.
- Use 2D arrays *X*, *Y* if you need an *arbitrary quadrilateral
grid* (i.e. if the quadrilaterals are not rectangular).
In this case *X* and *Y* are 2D arrays with shape (M, N),
specifying the x and y coordinates of the corners of the colored
quadrilaterals. See `~.Axes.pcolormesh` for details.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
These arguments can only be passed positionally.
cmap : str or `~matplotlib.colors.Colormap`, optional
A Colormap instance or registered colormap name. The colormap
maps the *C* values to colors. Defaults to :rc:`image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional
The Normalize instance scales the data values to the canonical
colormap range [0, 1] for mapping to colors. By default, the data
range is mapped to the colorbar range using linear scaling.
vmin, vmax : scalar, optional, default: None
The colorbar range. If *None*, suitable min/max values are
automatically chosen by the `~.Normalize` instance (defaults to
the respective min/max values of *C* in case of the default linear
scaling).
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque).
snap : bool, optional, default: False
Whether to snap the mesh to pixel boundaries.
Returns
-------
image : `.AxesImage` or `.PcolorImage` or `.QuadMesh`
The return type depends on the type of grid:
- `.AxesImage` for a regular rectangular grid.
- `.PcolorImage` for a non-regular rectangular grid.
- `.QuadMesh` for a non-rectangular grid.
Notes
-----
.. [notes section required to get data note injection right]
"""
if norm is not None and not isinstance(norm, mcolors.Normalize):
raise ValueError(
"'norm' must be an instance of 'mcolors.Normalize'")
C = args[-1]
nr, nc = np.shape(C)[:2]
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01 * np.abs(dx.mean()) and
np.ptp(dy) < 0.01 * np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
if C.ndim > 2:
raise ValueError(
'pcolorfast needs to use quadmesh, '
'which is not supported when x and y are 2D and C 3D')
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# data point in each cell is value at lower left corner
coords = np.stack([x, y], axis=-1)
collection = mcoll.QuadMesh(
nc, nr, coords,
array=np.ma.ravel(C), alpha=alpha, cmap=cmap, norm=norm,
antialiased=False, edgecolors="none")
self.add_collection(collection, autolim=False)
xl, xr, yb, yt = x.min(), x.max(), y.min(), y.max()
ret = collection
else: # It's one of the two image styles.
extent = xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(
self, cmap, norm,
data=C, alpha=alpha, extent=extent,
interpolation='nearest', origin='lower',
**kwargs)
elif style == "pcolorimage":
im = mimage.PcolorImage(
self, x, y, C,
cmap=cmap, norm=norm, alpha=alpha, extent=extent,
**kwargs)
self.add_image(im)
ret = im
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
ret.sticky_edges.x[:] = [xl, xr]
ret.sticky_edges.y[:] = [yb, yt]
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
@_preprocess_data()
def contour(self, *args, **kwargs):
kwargs['filled'] = False
contours = mcontour.QuadContourSet(self, *args, **kwargs)
self.autoscale_view()
return contours
contour.__doc__ = mcontour.QuadContourSet._contour_doc
@_preprocess_data()
def contourf(self, *args, **kwargs):
kwargs['filled'] = True
contours = mcontour.QuadContourSet(self, *args, **kwargs)
self.autoscale_view()
return contours
contourf.__doc__ = mcontour.QuadContourSet._contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
#### Data analysis
@_preprocess_data(replace_names=["x", 'weights'], label_namer="x")
def hist(self, x, bins=None, range=None, density=None, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None, stacked=False, normed=None,
**kwargs):
"""
Plot a histogram.
Compute and draw the histogram of *x*. The return value is a tuple
(*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*, [*patches0*,
*patches1*,...]) if the input contains multiple data. See the
documentation of the *weights* parameter to draw a histogram of
already-binned data.
Multiple data can be provided via *x* as a list of datasets
of potentially different length ([*x0*, *x1*, ...]), or as
a 2-D ndarray in which each column is a dataset. Note that
the ndarray form is transposed relative to the list form.
Masked arrays are not supported at present.
Parameters
----------
x : (n,) array or sequence of (n,) arrays
Input values, this takes either a single array or a sequence of
arrays which are not required to be of the same length.
bins : int or sequence or str, optional
If an integer is given, ``bins + 1`` bin edges are calculated and
returned, consistent with `numpy.histogram`.
If `bins` is a sequence, gives bin edges, including left edge of
first bin and right edge of last bin. In this case, `bins` is
returned unmodified.
All but the last (righthand-most) bin is half-open. In other
words, if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
Unequally spaced bins are supported if *bins* is a sequence.
With Numpy 1.11 or newer, you can alternatively provide a string
describing a binning strategy, such as 'auto', 'sturges', 'fd',
'doane', 'scott', 'rice' or 'sqrt', see
`numpy.histogram`.
The default is taken from :rc:`hist.bins`.
range : tuple or None, optional
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is ``(x.min(), x.max())``.
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling
is based on the specified bin range instead of the
range of x.
Default is ``None``
density : bool, optional
If ``True``, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
the area (or integral) under the histogram will sum to 1.
This is achieved by dividing the count by the number of
observations times the bin width and not dividing by the total
number of observations. If *stacked* is also ``True``, the sum of
the histograms is normalized to 1.
Default is ``None`` for both *normed* and *density*. If either is
set, then that value will be used. If neither are set, then the
args will be treated as ``False``.
If both *density* and *normed* are set an error is raised.
weights : (n, ) array_like or None, optional
An array of weights, of the same shape as *x*. Each value in *x*
only contributes its associated weight towards the bin count
(instead of 1). If *normed* or *density* is ``True``,
the weights are normalized, so that the integral of the density
over the range remains 1.
Default is ``None``.
This parameter can be used to draw a histogram of data that has
already been binned, e.g. using `np.histogram` (by treating each
bin as a single point with a weight equal to its count) ::
counts, bins = np.histogram(data)
plt.hist(bins[:-1], bins, weights=counts)
(or you may alternatively use `~.bar()`).
cumulative : bool, optional
If ``True``, then a histogram is computed where each bin gives the
counts in that bin plus all bins for smaller values. The last bin
gives the total number of datapoints. If *normed* or *density*
is also ``True`` then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g., -1), the direction of accumulation is reversed.
In this case, if *normed* and/or *density* is also ``True``, then
the histogram is normalized such that the first bin equals 1.
Default is ``False``
bottom : array_like, scalar, or None
Location of the bottom baseline of each bin. If a scalar,
the base line for each bin is shifted by the same amount.
If an array, each bin is shifted independently and the length
of bottom must match the number of bins. If None, defaults to 0.
Default is ``None``
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, optional
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are arranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
Default is 'bar'
align : {'left', 'mid', 'right'}, optional
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
Default is 'mid'
orientation : {'horizontal', 'vertical'}, optional
If 'horizontal', `~matplotlib.pyplot.barh` will be used for
bar-type histograms and the *bottom* kwarg will be the left edges.
rwidth : scalar or None, optional
The relative width of the bars as a fraction of the bin width. If
``None``, automatically compute the width.
Ignored if *histtype* is 'step' or 'stepfilled'.
Default is ``None``
log : bool, optional
If ``True``, the histogram axis will be set to a log scale. If
*log* is ``True`` and *x* is a 1D array, empty bins will be
filtered out and only the non-empty ``(n, bins, patches)``
will be returned.
Default is ``False``
color : color or array_like of colors or None, optional
Color spec or sequence of color specs, one per dataset. Default
(``None``) uses the standard line color sequence.
Default is ``None``
label : str or None, optional
String, or sequence of strings to match multiple datasets. Bar
charts yield multiple patches per dataset, but only the first gets
the label, so that the legend command will work as expected.
default is ``None``
stacked : bool, optional
If ``True``, multiple data are stacked on top of each other If
``False`` multiple data are arranged side by side if histtype is
'bar' or on top of each other if histtype is 'step'
Default is ``False``
normed : bool, optional
Deprecated; use the density keyword argument instead.
Returns
-------
n : array or list of arrays
The values of the histogram bins. See *density* and *weights* for a
description of the possible semantics. If input *x* is an array,
then this is an array of length *nbins*. If input is a sequence of
arrays ``[data1, data2,..]``, then this is a list of arrays with
the values of the histograms for each of the arrays in the same
order. The dtype of the array *n* (or of its element arrays) will
always be float even if no weighting or normalization is used.
bins : array
The edges of the bins. Length nbins + 1 (nbins left edges and right
edge of last bin). Always a single array even when multiple data
sets are passed in.
patches : list or list of lists
Silent list of individual patches used to create the histogram
or list of such list if multiple input datasets.
Other Parameters
----------------
**kwargs : `~matplotlib.patches.Patch` properties
See also
--------
hist2d : 2D histograms
Notes
-----
.. [Notes section required for data comment. See #10189.]
"""
# Avoid shadowing the builtin.
bin_range = range
from builtins import range
if np.isscalar(x):
x = [x]
if bins is None:
bins = rcParams['hist.bins']
# Validate string inputs here to avoid cluttering subsequent code.
cbook._check_in_list(['bar', 'barstacked', 'step', 'stepfilled'],
histtype=histtype)
cbook._check_in_list(['left', 'mid', 'right'], align=align)
cbook._check_in_list(['horizontal', 'vertical'],
orientation=orientation)
if histtype == 'barstacked' and not stacked:
stacked = True
if density is not None and normed is not None:
raise ValueError("kwargs 'density' and 'normed' cannot be used "
"simultaneously. "
"Please only use 'density', since 'normed'"
"is deprecated.")
if normed is not None:
cbook.warn_deprecated("2.1", name="'normed'", obj_type="kwarg",
alternative="'density'", removal="3.1")
# basic input validation
input_empty = np.size(x) == 0
# Massage 'x' for processing.
x = cbook._reshape_2D(x, 'x')
nx = len(x) # number of datasets
# Process unit information
# Unit conversion is done individually on each dataset
self._process_unit_info(xdata=x[0], kwargs=kwargs)
x = [self.convert_xunits(xi) for xi in x]
if bin_range is not None:
bin_range = self.convert_xunits(bin_range)
# We need to do to 'weights' what was done to 'x'
if weights is not None:
w = cbook._reshape_2D(weights, 'weights')
else:
w = [None] * nx
if len(w) != nx:
raise ValueError('weights should have the same shape as x')
for xi, wi in zip(x, w):
if wi is not None and len(wi) != len(xi):
raise ValueError(
'weights should have the same shape as x')
if color is None:
color = [self._get_lines.get_next_color() for i in range(nx)]
else:
color = mcolors.to_rgba_array(color)
if len(color) != nx:
error_message = (
"color kwarg must have one color per data set. %d data "
"sets and %d colors were provided" % (nx, len(color)))
raise ValueError(error_message)
hist_kwargs = dict()
# if the bin_range is not given, compute without nan numpy
# does not do this for us when guessing the range (but will
# happily ignore nans when computing the histogram).
if bin_range is None:
xmin = np.inf
xmax = -np.inf
for xi in x:
if len(xi):
# python's min/max ignore nan,
# np.minnan returns nan for all nan input
xmin = min(xmin, np.nanmin(xi))
xmax = max(xmax, np.nanmax(xi))
# make sure we have seen at least one non-nan and finite
# value before we reset the bin range
if not np.isnan([xmin, xmax]).any() and not (xmin > xmax):
bin_range = (xmin, xmax)
# If bins are not specified either explicitly or via range,
# we need to figure out the range required for all datasets,
# and supply that to np.histogram.
if not input_empty and len(x) > 1:
if weights is not None:
_w = np.concatenate(w)
else:
_w = None
bins = histogram_bin_edges(np.concatenate(x),
bins, bin_range, _w)
else:
hist_kwargs['range'] = bin_range
density = bool(density) or bool(normed)
if density and not stacked:
hist_kwargs = dict(density=density)
# List to store all the top coordinates of the histograms
tops = []
mlast = None
# Loop through datasets
for i in range(nx):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, weights=w[i], **hist_kwargs)
m = m.astype(float) # causes problems later if it's an int
if mlast is None:
mlast = np.zeros(len(bins)-1, m.dtype)
if stacked:
m += mlast
mlast[:] = m
tops.append(m)
# If a stacked density plot, normalize so the area of all the stacked
# histograms together is 1
if stacked and density:
db = np.diff(bins)
for m in tops:
m[:] = (m / db) / tops[-1].sum()
if cumulative:
slc = slice(None)
if isinstance(cumulative, Number) and cumulative < 0:
slc = slice(None, None, -1)
if density:
tops = [(m * np.diff(bins))[slc].cumsum()[slc] for m in tops]
else:
tops = [m[slc].cumsum()[slc] for m in tops]
patches = []
# Save autoscale state for later restoration; turn autoscaling
# off so we can do it all a single time at the end, instead
# of having it done by bar or fill and then having to be redone.
_saved_autoscalex = self.get_autoscalex_on()
_saved_autoscaley = self.get_autoscaley_on()
self.set_autoscalex_on(False)
self.set_autoscaley_on(False)
if histtype.startswith('bar'):
totwidth = np.diff(bins)
if rwidth is not None:
dr = np.clip(rwidth, 0, 1)
elif (len(tops) > 1 and
((not stacked) or rcParams['_internal.classic_mode'])):
dr = 0.8
else:
dr = 1.0
if histtype == 'bar' and not stacked:
width = dr * totwidth / nx
dw = width
boffset = -0.5 * dr * totwidth * (1 - 1 / nx)
elif histtype == 'barstacked' or stacked:
width = dr * totwidth
boffset, dw = 0.0, 0.0
if align == 'mid':
boffset += 0.5 * totwidth
elif align == 'right':
boffset += totwidth
if orientation == 'horizontal':
_barfunc = self.barh
bottom_kwarg = 'left'
else: # orientation == 'vertical'
_barfunc = self.bar
bottom_kwarg = 'bottom'
for m, c in zip(tops, color):
if bottom is None:
bottom = np.zeros(len(m))
if stacked:
height = m - bottom
else:
height = m
patch = _barfunc(bins[:-1]+boffset, height, width,
align='center', log=log,
color=c, **{bottom_kwarg: bottom})
patches.append(patch)
if stacked:
bottom[:] = m
boffset += dw
elif histtype.startswith('step'):
# these define the perimeter of the polygon
x = np.zeros(4 * len(bins) - 3)
y = np.zeros(4 * len(bins) - 3)
x[0:2*len(bins)-1:2], x[1:2*len(bins)-1:2] = bins, bins[:-1]
x[2*len(bins)-1:] = x[1:2*len(bins)-1][::-1]
if bottom is None:
bottom = np.zeros(len(bins) - 1)
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = bottom, bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
if log:
if orientation == 'horizontal':
self.set_xscale('log', nonposx='clip')
logbase = self.xaxis._scale.base
else: # orientation == 'vertical'
self.set_yscale('log', nonposy='clip')
logbase = self.yaxis._scale.base
# Setting a minimum of 0 results in problems for log plots
if np.min(bottom) > 0:
minimum = np.min(bottom)
elif density or weights is not None:
# For data that is normed to form a probability density,
# set to minimum data value / logbase
# (gives 1 full tick-label unit for the lowest filled bin)
ndata = np.array(tops)
minimum = (np.min(ndata[ndata > 0])) / logbase
else:
# For non-normed (density = False) data,
# set the min to 1 / log base,
# again so that there is 1 full tick-label unit
# for the lowest bin
minimum = 1.0 / logbase
y[0], y[-1] = minimum, minimum
else:
minimum = 0
if align == 'left':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
# If fill kwarg is set, it will be passed to the patch collection,
# overriding this
fill = (histtype == 'stepfilled')
xvals, yvals = [], []
for m in tops:
if stacked:
# starting point for drawing polygon
y[0] = y[1]
# top of the previous polygon becomes the bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
# set the top of this polygon
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = (m + bottom,
m + bottom)
if log:
y[y < minimum] = minimum
if orientation == 'horizontal':
xvals.append(y.copy())
yvals.append(x.copy())
else:
xvals.append(x.copy())
yvals.append(y.copy())
# stepfill is closed, step is not
split = -1 if fill else 2 * len(bins)
# add patches in reverse order so that when stacking,
# items lower in the stack are plotted on top of
# items higher in the stack
for x, y, c in reversed(list(zip(xvals, yvals, color))):
patches.append(self.fill(
x[:split], y[:split],
closed=True if fill else None,
facecolor=c,
edgecolor=None if fill else c,
fill=fill if fill else None))
for patch_list in patches:
for patch in patch_list:
if orientation == 'vertical':
patch.sticky_edges.y.append(minimum)
elif orientation == 'horizontal':
patch.sticky_edges.x.append(minimum)
# we return patches, so put it back in the expected order
patches.reverse()
self.set_autoscalex_on(_saved_autoscalex)
self.set_autoscaley_on(_saved_autoscaley)
self.autoscale_view()
if label is None:
labels = [None]
elif isinstance(label, str):
labels = [label]
elif not np.iterable(label):
labels = [str(label)]
else:
labels = [str(lab) for lab in label]
for patch, lbl in itertools.zip_longest(patches, labels):
if patch:
p = patch[0]
p.update(kwargs)
if lbl is not None:
p.set_label(lbl)
for p in patch[1:]:
p.update(kwargs)
p.set_label('_nolegend_')
if nx == 1:
return tops[0], bins, cbook.silent_list('Patch', patches[0])
else:
return tops, bins, cbook.silent_list('Lists of Patches', patches)
@_preprocess_data(replace_names=["x", "y", "weights"])
@cbook._rename_parameter("3.1", "normed", "density")
def hist2d(self, x, y, bins=10, range=None, density=False, weights=None,
cmin=None, cmax=None, **kwargs):
"""
Make a 2D histogram plot.
Parameters
----------
x, y : array_like, shape (n, )
Input values
bins : None or int or [int, int] or array_like or [array, array]
The bin specification:
- If int, the number of bins for the two dimensions
(nx=ny=bins).
- If ``[int, int]``, the number of bins in each dimension
(nx, ny = bins).
- If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
- If ``[array, array]``, the bin edges in each dimension
(x_edges, y_edges = bins).
The default value is 10.
range : array_like shape(2, 2), optional, default: None
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the bins parameters): ``[[xmin,
xmax], [ymin, ymax]]``. All values outside of this range will be
considered outliers and not tallied in the histogram.
density : bool, optional, default: False
Normalize histogram. *normed* is a deprecated synonym for this
parameter.
weights : array_like, shape (n, ), optional, default: None
An array of values w_i weighing each sample (x_i, y_i).
cmin : scalar, optional, default: None
All bins that has count less than cmin will not be displayed and
these count values in the return value count histogram will also
be set to nan upon return
cmax : scalar, optional, default: None
All bins that has count more than cmax will not be displayed (set
to none before passing to imshow) and these count values in the
return value count histogram will also be set to nan upon return
Returns
-------
h : 2D array
The bi-dimensional histogram of samples x and y. Values in x are
histogrammed along the first dimension and values in y are
histogrammed along the second dimension.
xedges : 1D array
The bin edges along the x axis.
yedges : 1D array
The bin edges along the y axis.
image : `~.matplotlib.collections.QuadMesh`
Other Parameters
----------------
cmap : Colormap or str, optional
A `.colors.Colormap` instance. If not set, use rc settings.
norm : Normalize, optional
A `.colors.Normalize` instance is used to
scale luminance data to ``[0, 1]``. If not set, defaults to
`.colors.Normalize()`.
vmin/vmax : None or scalar, optional
Arguments passed to the `~.colors.Normalize` instance.
alpha : ``0 <= scalar <= 1`` or ``None``, optional
The alpha blending value.
See also
--------
hist : 1D histogram plotting
Notes
-----
- Currently ``hist2d`` calculates it's own axis limits, and any limits
previously set are ignored.
- Rendering the histogram with a logarithmic color scale is
accomplished by passing a `.colors.LogNorm` instance to the *norm*
keyword argument. Likewise, power-law normalization (similar
in effect to gamma correction) can be accomplished with
`.colors.PowerNorm`.
"""
h, xedges, yedges = np.histogram2d(x, y, bins=bins, range=range,
normed=density, weights=weights)
if cmin is not None:
h[h < cmin] = None
if cmax is not None:
h[h > cmax] = None
pc = self.pcolormesh(xedges, yedges, h.T, **kwargs)
self.set_xlim(xedges[0], xedges[-1])
self.set_ylim(yedges[0], yedges[-1])
return h, xedges, yedges, pc
@_preprocess_data(replace_names=["x"])
@docstring.dedent_interpd
def psd(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, return_line=None, **kwargs):
r"""
Plot the power spectral density.
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int
The number of points of overlap between segments.
The default value is 0 (no overlap).
Fc : int
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
return_line : bool
Whether to include the line object plotted in the returned values.
Default is False.
Returns
-------
Pxx : 1-D array
The values for the power spectrum `P_{xx}` before scaling
(real valued).
freqs : 1-D array
The frequencies corresponding to the elements in *Pxx*.
line : a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Only returned if *return_line* is True.
Other Parameters
----------------
**kwargs
Keyword arguments control the :class:`~matplotlib.lines.Line2D`
properties:
%(_Line2D_docstr)s
See Also
--------
:func:`specgram`
:func:`specgram` differs in the default overlap; in not returning
the mean of the segment periodograms; in returning the times of the
segments; and in plotting a colormap instead of a line.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` plots the magnitude spectrum.
:func:`csd`
:func:`csd` plots the spectral density between two signals.
Notes
-----
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures,
John Wiley & Sons (1986)
"""
if Fc is None:
Fc = 0
pxx, freqs = mlab.psd(x=x, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
line = self.plot(freqs, 10 * np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax - vmin
logi = int(np.log10(intv))
if logi == 0:
logi = .1
step = 10 * logi
ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
self.set_yticks(ticks)
if return_line is None or not return_line:
return pxx, freqs
else:
return pxx, freqs, line
@_preprocess_data(replace_names=["x", "y"], label_namer="y")
@docstring.dedent_interpd
def csd(self, x, y, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, return_line=None, **kwargs):
"""
Plot the cross-spectral density.
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
Parameters
----------
x, y : 1-D arrays or sequences
Arrays or sequences containing the data.
%(Spectral)s
%(PSD)s
noverlap : int
The number of points of overlap between segments.
The default value is 0 (no overlap).
Fc : int
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
return_line : bool
Whether to include the line object plotted in the returned values.
Default is False.
Returns
-------
Pxy : 1-D array
The values for the cross spectrum `P_{xy}` before scaling
(complex valued).
freqs : 1-D array
The frequencies corresponding to the elements in *Pxy*.
line : a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Only returned if *return_line* is True.
Other Parameters
----------------
**kwargs
Keyword arguments control the :class:`~matplotlib.lines.Line2D`
properties:
%(_Line2D_docstr)s
See Also
--------
:func:`psd`
:func:`psd` is the equivalent to setting y=x.
Notes
-----
For plotting, the power is plotted as
:math:`10\\log_{10}(P_{xy})` for decibels, though `P_{xy}` itself
is returned.
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures,
John Wiley & Sons (1986)
"""
if Fc is None:
Fc = 0
pxy, freqs = mlab.csd(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
# pxy is complex
freqs += Fc
line = self.plot(freqs, 10 * np.log10(np.abs(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax - vmin
step = 10 * int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
self.set_yticks(ticks)
if return_line is None or not return_line:
return pxy, freqs
else:
return pxy, freqs, line
@_preprocess_data(replace_names=["x"])
@docstring.dedent_interpd
def magnitude_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, scale=None,
**kwargs):
"""
Plot the magnitude spectrum.
Compute the magnitude spectrum of *x*. Data is padded to a
length of *pad_to* and the windowing function *window* is applied to
the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data.
%(Spectral)s
%(Single_Spectrum)s
scale : {'default', 'linear', 'dB'}
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale, i.e., the dB amplitude
(20 * log10). 'default' is 'linear'.
Fc : int
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns
-------
spectrum : 1-D array
The values for the magnitude spectrum before scaling (real valued).
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*.
line : a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Other Parameters
----------------
**kwargs
Keyword arguments control the :class:`~matplotlib.lines.Line2D`
properties:
%(_Line2D_docstr)s
See Also
--------
:func:`psd`
:func:`psd` plots the power spectral density.`.
:func:`angle_spectrum`
:func:`angle_spectrum` plots the angles of the corresponding
frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` plots the phase (unwrapped angle) of the
corresponding frequencies.
:func:`specgram`
:func:`specgram` can plot the magnitude spectrum of segments within
the signal in a colormap.
Notes
-----
.. [Notes section required for data comment. See #10189.]
"""
if Fc is None:
Fc = 0
if scale is None or scale == 'default':
scale = 'linear'
spec, freqs = mlab.magnitude_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
if scale == 'linear':
Z = spec
yunits = 'energy'
elif scale == 'dB':
Z = 20. * np.log10(spec)
yunits = 'dB'
else:
raise ValueError('Unknown scale %s', scale)
lines = self.plot(freqs, Z, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Magnitude (%s)' % yunits)
return spec, freqs, lines[0]
@_preprocess_data(replace_names=["x"])
@docstring.dedent_interpd
def angle_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, **kwargs):
"""
Plot the angle spectrum.
Compute the angle spectrum (wrapped phase spectrum) of *x*.
Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data.
%(Spectral)s
%(Single_Spectrum)s
Fc : int
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns
-------
spectrum : 1-D array
The values for the angle spectrum in radians (real valued).
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*.
line : a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Other Parameters
----------------
**kwargs
Keyword arguments control the :class:`~matplotlib.lines.Line2D`
properties:
%(_Line2D_docstr)s
See Also
--------
:func:`magnitude_spectrum`
:func:`angle_spectrum` plots the magnitudes of the corresponding
frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` plots the unwrapped version of this
function.
:func:`specgram`
:func:`specgram` can plot the angle spectrum of segments within the
signal in a colormap.
Notes
-----
.. [Notes section required for data comment. See #10189.]
"""
if Fc is None:
Fc = 0
spec, freqs = mlab.angle_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
lines = self.plot(freqs, spec, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Angle (radians)')
return spec, freqs, lines[0]
@_preprocess_data(replace_names=["x"])
@docstring.dedent_interpd
def phase_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, **kwargs):
"""
Plot the phase spectrum.
Compute the phase spectrum (unwrapped angle spectrum) of *x*.
Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Fc : int
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns
-------
spectrum : 1-D array
The values for the phase spectrum in radians (real valued).
freqs : 1-D array
The frequencies corresponding to the elements in *spectrum*.
line : a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Other Parameters
----------------
**kwargs
Keyword arguments control the :class:`~matplotlib.lines.Line2D`
properties:
%(_Line2D_docstr)s
See Also
--------
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` plots the magnitudes of the
corresponding frequencies.
:func:`angle_spectrum`
:func:`angle_spectrum` plots the wrapped version of this function.
:func:`specgram`
:func:`specgram` can plot the phase spectrum of segments within the
signal in a colormap.
Notes
-----
.. [Notes section required for data comment. See #10189.]
"""
if Fc is None:
Fc = 0
spec, freqs = mlab.phase_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
lines = self.plot(freqs, spec, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Phase (radians)')
return spec, freqs, lines[0]
@_preprocess_data(replace_names=["x", "y"])
@docstring.dedent_interpd
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
Plot the coherence between *x* and *y*.
Plot the coherence between *x* and *y*. Coherence is the
normalized cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
Parameters
----------
%(Spectral)s
%(PSD)s
noverlap : int
The number of points of overlap between blocks. The
default value is 0 (no overlap).
Fc : int
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns
-------
Cxy : 1-D array
The coherence vector.
freqs : 1-D array
The frequencies for the elements in *Cxy*.
Other Parameters
----------------
**kwargs
Keyword arguments control the :class:`~matplotlib.lines.Line2D`
properties:
%(_Line2D_docstr)s
References
----------
Bendat & Piersol -- Random Data: Analysis and Measurement Procedures,
John Wiley & Sons (1986)
"""
cxy, freqs = mlab.cohere(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap,
scale_by_freq=scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
@_preprocess_data(replace_names=["x"])
@docstring.dedent_interpd
def specgram(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None,
cmap=None, xextent=None, pad_to=None, sides=None,
scale_by_freq=None, mode=None, scale=None,
vmin=None, vmax=None, **kwargs):
"""
Plot a spectrogram.
Compute and plot a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the spectrum of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*. The spectrogram is plotted as a colormap
(using imshow).
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data.
%(Spectral)s
%(PSD)s
mode : {'default', 'psd', 'magnitude', 'angle', 'phase'}
What sort of spectrum to use. Default is 'psd', which takes the
power spectral density. 'magnitude' returns the magnitude
spectrum. 'angle' returns the phase spectrum without unwrapping.
'phase' returns the phase spectrum with unwrapping.
noverlap : int
The number of points of overlap between blocks. The
default value is 128.
scale : {'default', 'linear', 'dB'}
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale. When *mode* is 'psd',
this is dB power (10 * log10). Otherwise this is dB amplitude
(20 * log10). 'default' is 'dB' if *mode* is 'psd' or
'magnitude' and 'linear' otherwise. This must be 'linear'
if *mode* is 'angle' or 'phase'.
Fc : int
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
cmap
A :class:`matplotlib.colors.Colormap` instance; if *None*, use
default determined by rc
xextent : *None* or (xmin, xmax)
The image extent along the x-axis. The default sets *xmin* to the
left border of the first bin (*spectrum* column) and *xmax* to the
right border of the last bin. Note that for *noverlap>0* the width
of the bins is smaller than those of the segments.
**kwargs
Additional kwargs are passed on to imshow which makes the
specgram image.
Returns
-------
spectrum : 2-D array
Columns are the periodograms of successive segments.
freqs : 1-D array
The frequencies corresponding to the rows in *spectrum*.
t : 1-D array
The times corresponding to midpoints of segments (i.e., the columns
in *spectrum*).
im : instance of class :class:`~matplotlib.image.AxesImage`
The image created by imshow containing the spectrogram
See Also
--------
:func:`psd`
:func:`psd` differs in the default overlap; in returning the mean
of the segment periodograms; in not returning times; and in
generating a line plot instead of colormap.
:func:`magnitude_spectrum`
A single spectrum, similar to having a single segment when *mode*
is 'magnitude'. Plots a line instead of a colormap.
:func:`angle_spectrum`
A single spectrum, similar to having a single segment when *mode*
is 'angle'. Plots a line instead of a colormap.
:func:`phase_spectrum`
A single spectrum, similar to having a single segment when *mode*
is 'phase'. Plots a line instead of a colormap.
Notes
-----
The parameters *detrend* and *scale_by_freq* do only apply when *mode*
is set to 'psd'.
"""
if NFFT is None:
NFFT = 256 # same default as in mlab.specgram()
if Fc is None:
Fc = 0 # same default as in mlab._spectral_helper()
if noverlap is None:
noverlap = 128 # same default as in mlab.specgram()
if mode == 'complex':
raise ValueError('Cannot plot a complex specgram')
if scale is None or scale == 'default':
if mode in ['angle', 'phase']:
scale = 'linear'
else:
scale = 'dB'
elif mode in ['angle', 'phase'] and scale == 'dB':
raise ValueError('Cannot use dB scale with angle or phase mode')
spec, freqs, t = mlab.specgram(x=x, NFFT=NFFT, Fs=Fs,
detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if scale == 'linear':
Z = spec
elif scale == 'dB':
if mode is None or mode == 'default' or mode == 'psd':
Z = 10. * np.log10(spec)
else:
Z = 20. * np.log10(spec)
else:
raise ValueError('Unknown scale %s', scale)
Z = np.flipud(Z)
if xextent is None:
# padding is needed for first and last segment:
pad_xextent = (NFFT-noverlap) / Fs / 2
xextent = np.min(t) - pad_xextent, np.max(t) + pad_xextent
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent, vmin=vmin, vmax=vmax,
**kwargs)
self.axis('auto')
return spec, freqs, t, im
@docstring.dedent_interpd
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', origin="upper", **kwargs):
"""
Plot the sparsity pattern of a 2D array.
This visualizes the non-zero values of the array.
Two plotting styles are available: image and marker. Both
are available for full arrays, but only the marker style
works for `scipy.sparse.spmatrix` instances.
**Image style**
If *marker* and *markersize* are *None*, `~.Axes.imshow` is used. Any
extra remaining kwargs are passed to this method.
**Marker style**
If *Z* is a `scipy.sparse.spmatrix` or *marker* or *markersize* are
*None*, a `~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to `~.Axes.plot`.
Parameters
----------
Z : array-like (M, N)
The array to be plotted.
precision : float or 'present', optional, default: 0
If *precision* is 0, any non-zero value will be plotted. Otherwise,
values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, you can also
pass 'present'. In this case any value present in the array
will be plotted, even if it is identically zero.
origin : {'upper', 'lower'}, optional
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. The convention 'upper' is typically used for
matrices and images.
If not given, :rc:`image.origin` is used, defaulting to 'upper'.
aspect : {'equal', 'auto', None} or float, optional
Controls the aspect ratio of the axes. The aspect is of particular
relevance for images since it may distort the image, i.e. pixel
will not be square.
This parameter is a shortcut for explicitly calling
`.Axes.set_aspect`. See there for further details.
- 'equal': Ensures an aspect ratio of 1. Pixels will be square.
- 'auto': The axes is kept fixed and the aspect is adjusted so
that the data fit in the axes. In general, this will result in
non-square pixels.
- *None*: Use :rc:`image.aspect` (default: 'equal').
Default: 'equal'
Returns
-------
ret : `~matplotlib.image.AxesImage` or `.Line2D`
The return type depends on the plotting style (see above).
Other Parameters
----------------
**kwargs
The supported additional parameters depend on the plotting style.
For the image style, you can pass the following additional
parameters of `~.Axes.imshow`:
- *cmap*
- *alpha*
- *url*
- any `.Artist` properties (passed on to the `.AxesImage`)
For the marker style, you can pass any `.Line2D` property except
for *linestyle*:
%(_Line2D_docstr)s
"""
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.abs(Z) > precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
if 'interpolation' in kwargs:
raise TypeError(
"spy() got an unexpected keyword argument 'interpolation'")
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
origin=origin, **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.abs(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.abs(Z) > precision
y, x = np.nonzero(nonzero)
if marker is None:
marker = 's'
if markersize is None:
markersize = 10
if 'linestyle' in kwargs:
raise TypeError(
"spy() got an unexpected keyword argument 'linestyle'")
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(-0.5, nc - 0.5)
self.set_ylim(nr - 0.5, -0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
"""
Plot the values of a 2D matrix or array as color-coded image.
The matrix will be shown the way it would be printed, with the first
row at the top. Row and column numbering is zero-based.
Parameters
----------
Z : array-like(M, N)
The matrix to be displayed.
Returns
-------
image : `~matplotlib.image.AxesImage`
Other Parameters
----------------
**kwargs : `~matplotlib.axes.Axes.imshow` arguments
See Also
--------
imshow : More general function to plot data on a 2D regular raster.
Notes
-----
This is just a convenience function wrapping `.imshow` to set useful
defaults for a displaying a matrix. In particular:
- Set ``origin='upper'``.
- Set ``interpolation='nearest'``.
- Set ``aspect='equal'``.
- Ticks are placed to the left and above.
- Ticks are formatted to show integer indices.
"""
Z = np.asanyarray(Z)
kw = {'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal', # (already the imshow default)
**kwargs}
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
@_preprocess_data(replace_names=["dataset"])
def violinplot(self, dataset, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False,
points=100, bw_method=None):
"""
Make a violin plot.
Make a violin plot for each column of *dataset* or each vector in
sequence *dataset*. Each filled area extends to represent the
entire data range, with optional lines at the mean, the median,
the minimum, and the maximum.
Parameters
----------
dataset : Array or a sequence of vectors.
The input data.
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the violins. The ticks and limits are
automatically set to match the positions.
vert : bool, default = True.
If true, creates a vertical violin plot.
Otherwise, creates a horizontal violin plot.
widths : array-like, default = 0.5
Either a scalar or a vector that sets the maximal width of
each violin. The default is 0.5, which uses about half of the
available horizontal space.
showmeans : bool, default = False
If `True`, will toggle rendering of the means.
showextrema : bool, default = True
If `True`, will toggle rendering of the extrema.
showmedians : bool, default = False
If `True`, will toggle rendering of the medians.
points : scalar, default = 100
Defines the number of points to evaluate each of the
gaussian kernel density estimations at.
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as its only
parameter and return a scalar. If None (default), 'scott' is used.
Returns
-------
result : dict
A dictionary mapping each component of the violinplot to a
list of the corresponding collection instances created. The
dictionary has the following keys:
- ``bodies``: A list of the `~.collections.PolyCollection`
instances containing the filled area of each violin.
- ``cmeans``: A `~.collections.LineCollection` instance that marks
the mean values of each of the violin's distribution.
- ``cmins``: A `~.collections.LineCollection` instance that marks
the bottom of each violin's distribution.
- ``cmaxes``: A `~.collections.LineCollection` instance that marks
the top of each violin's distribution.
- ``cbars``: A `~.collections.LineCollection` instance that marks
the centers of each violin's distribution.
- ``cmedians``: A `~.collections.LineCollection` instance that
marks the median values of each of the violin's distribution.
Notes
-----
.. [Notes section required for data comment. See #10189.]
"""
def _kde_method(X, coords):
# fallback gracefully if the vector contains only one value
if np.all(X[0] == X):
return (X[0] == coords).astype(float)
kde = mlab.GaussianKDE(X, bw_method)
return kde.evaluate(coords)
vpstats = cbook.violin_stats(dataset, _kde_method, points=points)
return self.violin(vpstats, positions=positions, vert=vert,
widths=widths, showmeans=showmeans,
showextrema=showextrema, showmedians=showmedians)
def violin(self, vpstats, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False):
"""Drawing function for violin plots.
Draw a violin plot for each column of `vpstats`. Each filled area
extends to represent the entire data range, with optional lines at the
mean, the median, the minimum, and the maximum.
Parameters
----------
vpstats : list of dicts
A list of dictionaries containing stats for each violin plot.
Required keys are:
- ``coords``: A list of scalars containing the coordinates that
the violin's kernel density estimate were evaluated at.
- ``vals``: A list of scalars containing the values of the
kernel density estimate at each of the coordinates given
in *coords*.
- ``mean``: The mean value for this violin's dataset.
- ``median``: The median value for this violin's dataset.
- ``min``: The minimum value for this violin's dataset.
- ``max``: The maximum value for this violin's dataset.
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the violins. The ticks and limits are
automatically set to match the positions.
vert : bool, default = True.
If true, plots the violins vertically.
Otherwise, plots the violins horizontally.
widths : array-like, default = 0.5
Either a scalar or a vector that sets the maximal width of
each violin. The default is 0.5, which uses about half of the
available horizontal space.
showmeans : bool, default = False
If true, will toggle rendering of the means.
showextrema : bool, default = True
If true, will toggle rendering of the extrema.
showmedians : bool, default = False
If true, will toggle rendering of the medians.
Returns
-------
result : dict
A dictionary mapping each component of the violinplot to a
list of the corresponding collection instances created. The
dictionary has the following keys:
- ``bodies``: A list of the `~.collections.PolyCollection`
instances containing the filled area of each violin.
- ``cmeans``: A `~.collections.LineCollection` instance that marks
the mean values of each of the violin's distribution.
- ``cmins``: A `~.collections.LineCollection` instance that marks
the bottom of each violin's distribution.
- ``cmaxes``: A `~.collections.LineCollection` instance that marks
the top of each violin's distribution.
- ``cbars``: A `~.collections.LineCollection` instance that marks
the centers of each violin's distribution.
- ``cmedians``: A `~.collections.LineCollection` instance that
marks the median values of each of the violin's distribution.
"""
# Statistical quantities to be plotted on the violins
means = []
mins = []
maxes = []
medians = []
# Collections to be returned
artists = {}
N = len(vpstats)
datashape_message = ("List of violinplot statistics and `{0}` "
"values must have the same length")
# Validate positions
if positions is None:
positions = range(1, N + 1)
elif len(positions) != N:
raise ValueError(datashape_message.format("positions"))
# Validate widths
if np.isscalar(widths):
widths = [widths] * N
elif len(widths) != N:
raise ValueError(datashape_message.format("widths"))
# Calculate ranges for statistics lines
pmins = -0.25 * np.array(widths) + positions
pmaxes = 0.25 * np.array(widths) + positions
# Check whether we are rendering vertically or horizontally
if vert:
fill = self.fill_betweenx
perp_lines = self.hlines
par_lines = self.vlines
else:
fill = self.fill_between
perp_lines = self.vlines
par_lines = self.hlines
if rcParams['_internal.classic_mode']:
fillcolor = 'y'
edgecolor = 'r'
else:
fillcolor = edgecolor = self._get_lines.get_next_color()
# Render violins
bodies = []
for stats, pos, width in zip(vpstats, positions, widths):
# The 0.5 factor reflects the fact that we plot from v-p to
# v+p
vals = np.array(stats['vals'])
vals = 0.5 * width * vals / vals.max()
bodies += [fill(stats['coords'],
-vals + pos,
vals + pos,
facecolor=fillcolor,
alpha=0.3)]
means.append(stats['mean'])
mins.append(stats['min'])
maxes.append(stats['max'])
medians.append(stats['median'])
artists['bodies'] = bodies
# Render means
if showmeans:
artists['cmeans'] = perp_lines(means, pmins, pmaxes,
colors=edgecolor)
# Render extrema
if showextrema:
artists['cmaxes'] = perp_lines(maxes, pmins, pmaxes,
colors=edgecolor)
artists['cmins'] = perp_lines(mins, pmins, pmaxes,
colors=edgecolor)
artists['cbars'] = par_lines(positions, mins, maxes,
colors=edgecolor)
# Render medians
if showmedians:
artists['cmedians'] = perp_lines(medians,
pmins,
pmaxes,
colors=edgecolor)
return artists
# Methods that are entirely implemented in other modules.
table = mtable.table
# args can by either Y or y1,y2,... and all should be replaced
stackplot = _preprocess_data()(mstack.stackplot)
streamplot = _preprocess_data(
replace_names=["x", "y", "u", "v", "start_points"])(mstream.streamplot)
tricontour = mtri.tricontour
tricontourf = mtri.tricontourf
tripcolor = mtri.tripcolor
triplot = mtri.triplot
|
f360a80d6008d6fd447df02ffd81c0ffa418c748b295fef0b8e7d1ed4397d609
|
import collections
import numpy as np
import numbers
import warnings
import matplotlib.docstring as docstring
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.scale as mscale
import matplotlib.cbook as cbook
from matplotlib.axes._base import _AxesBase
from matplotlib.ticker import (
AutoLocator,
AutoMinorLocator,
FixedLocator,
FuncFormatter,
LogFormatterSciNotation,
LogLocator,
NullLocator,
NullFormatter,
ScalarFormatter
)
from matplotlib.scale import Log10Transform
def _make_secondary_locator(rect, parent):
"""
Helper function to locate the secondary axes.
A locator gets used in `Axes.set_aspect` to override the default
locations... It is a function that takes an axes object and
a renderer and tells `set_aspect` where it is to be placed.
This locator make the transform be in axes-relative co-coordinates
because that is how we specify the "location" of the secondary axes.
Here *rect* is a rectangle [l, b, w, h] that specifies the
location for the axes in the transform given by *trans* on the
*parent*.
"""
_rect = mtransforms.Bbox.from_bounds(*rect)
def secondary_locator(ax, renderer):
# delay evaluating transform until draw time because the
# parent transform may have changed (i.e. if window reesized)
bb = mtransforms.TransformedBbox(_rect, parent.transAxes)
tr = parent.figure.transFigure.inverted()
bb = mtransforms.TransformedBbox(bb, tr)
return bb
return secondary_locator
class SecondaryAxis(_AxesBase):
"""
General class to hold a Secondary_X/Yaxis.
"""
def __init__(self, parent, orientation,
location, functions, **kwargs):
"""
See `.secondary_xaxis` and `.secondary_yaxis` for the doc string.
While there is no need for this to be private, it should really be
called by those higher level functions.
"""
self._functions = functions
self._parent = parent
self._orientation = orientation
self._ticks_set = False
if self._orientation == 'x':
super().__init__(self._parent.figure, [0, 1., 1, 0.0001], **kwargs)
self._axis = self.xaxis
self._locstrings = ['top', 'bottom']
self._otherstrings = ['left', 'right']
elif self._orientation == 'y':
super().__init__(self._parent.figure, [0, 1., 0.0001, 1], **kwargs)
self._axis = self.yaxis
self._locstrings = ['right', 'left']
self._otherstrings = ['top', 'bottom']
# this gets positioned w/o constrained_layout so exclude:
self._layoutbox = None
self._poslayoutbox = None
self.set_location(location)
self.set_functions(functions)
# styling:
if self._orientation == 'x':
otheraxis = self.yaxis
else:
otheraxis = self.xaxis
otheraxis.set_major_locator(mticker.NullLocator())
otheraxis.set_ticks_position('none')
for st in self._otherstrings:
self.spines[st].set_visible(False)
for st in self._locstrings:
self.spines[st].set_visible(True)
if self._pos < 0.5:
# flip the location strings...
self._locstrings = self._locstrings[::-1]
self.set_alignment(self._locstrings[0])
def set_alignment(self, align):
"""
Set if axes spine and labels are drawn at top or bottom (or left/right)
of the axes.
Parameters
----------
align :: string
either 'top' or 'bottom' for orientation='x' or
'left' or 'right' for orientation='y' axis
"""
if align in self._locstrings:
if align == self._locstrings[1]:
# need to change the orientation.
self._locstrings = self._locstrings[::-1]
elif align != self._locstrings[0]:
raise ValueError('"{}" is not a valid axis orientation, '
'not changing the orientation;'
'choose "{}" or "{}""'.format(align,
self._locstrings[0], self._locstrings[1]))
self.spines[self._locstrings[0]].set_visible(True)
self.spines[self._locstrings[1]].set_visible(False)
self._axis.set_ticks_position(align)
self._axis.set_label_position(align)
def set_location(self, location):
"""
Set the vertical or horizontal location of the axes in
parent-normalized co-ordinates.
Parameters
----------
location : string or scalar
The position to put the secondary axis. Strings can be 'top' or
'bottom' for orientation='x' and 'right' or 'left' for
orientation='y', scalar can be a float indicating the relative
position on the parent axes to put the new axes, 0.0 being the
bottom (or left) and 1.0 being the top (or right).
"""
# This puts the rectangle into figure-relative coordinates.
if isinstance(location, str):
if location in ['top', 'right']:
self._pos = 1.
elif location in ['bottom', 'left']:
self._pos = 0.
else:
raise ValueError("location must be '{}', '{}', or a "
"float, not '{}'".format(location,
self._locstrings[0], self._locstrings[1]))
else:
self._pos = location
self._loc = location
if self._orientation == 'x':
bounds = [0, self._pos, 1., 1e-10]
else:
bounds = [self._pos, 0, 1e-10, 1]
secondary_locator = _make_secondary_locator(bounds, self._parent)
# this locator lets the axes move in the parent axes coordinates.
# so it never needs to know where the parent is explicitly in
# figure co-ordinates.
# it gets called in `ax.apply_aspect() (of all places)
self.set_axes_locator(secondary_locator)
def apply_aspect(self, position=None):
self._set_lims()
super().apply_aspect(position)
def set_ticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
Parameters
----------
ticks : list
List of x-axis tick locations.
minor : bool, optional
If ``False`` sets major ticks, if ``True`` sets minor ticks.
Default is ``False``.
"""
ret = self._axis.set_ticks(ticks, minor=minor)
self.stale = True
self._ticks_set = True
return ret
def set_functions(self, functions):
"""
Set how the secondary axis converts limits from the parent axes.
Parameters
----------
functions : 2-tuple of func, or `Transform` with an inverse.
Transform between the parent axis values and the secondary axis
values.
If supplied as a 2-tuple of functions, the first function is
the forward transform function and the second is the inverse
transform.
If a transform is supplied, then the transform must have an
inverse.
"""
if self._orientation == 'x':
set_scale = self.set_xscale
parent_scale = self._parent.get_xscale()
else:
set_scale = self.set_yscale
parent_scale = self._parent.get_yscale()
# we need to use a modified scale so the scale can receive the
# transform. Only types supported are linear and log10 for now.
# Probably possible to add other transforms as a todo...
if parent_scale == 'log':
defscale = 'functionlog'
else:
defscale = 'function'
if (isinstance(functions, tuple) and len(functions) == 2 and
callable(functions[0]) and callable(functions[1])):
# make an arbitrary convert from a two-tuple of functions
# forward and inverse.
self._functions = functions
elif functions is None:
self._functions = (lambda x: x, lambda x: x)
else:
raise ValueError('functions argument of secondary axes '
'must be a two-tuple of callable functions '
'with the first function being the transform '
'and the second being the inverse')
set_scale(defscale, functions=self._functions)
def draw(self, renderer=None, inframe=False):
"""
Draw the secondary axes.
Consults the parent axes for its limits and converts them
using the converter specified by
`~.axes._secondary_axes.set_functions` (or *functions*
parameter when axes initialized.)
"""
self._set_lims()
# this sets the scale in case the parent has set its scale.
self._set_scale()
super().draw(renderer=renderer, inframe=inframe)
def _set_scale(self):
"""
Check if parent has set its scale
"""
if self._orientation == 'x':
pscale = self._parent.xaxis.get_scale()
set_scale = self.set_xscale
if self._orientation == 'y':
pscale = self._parent.yaxis.get_scale()
set_scale = self.set_yscale
if pscale == 'log':
defscale = 'functionlog'
else:
defscale = 'function'
if self._ticks_set:
ticks = self._axis.get_ticklocs()
set_scale(defscale, functions=self._functions)
# OK, set_scale sets the locators, but if we've called
# axsecond.set_ticks, we want to keep those.
if self._ticks_set:
self._axis.set_major_locator(FixedLocator(ticks))
def _set_lims(self):
"""
Set the limits based on parent limits and the convert method
between the parent and this secondary axes
"""
if self._orientation == 'x':
lims = self._parent.get_xlim()
set_lim = self.set_xlim
trans = self.xaxis.get_transform()
if self._orientation == 'y':
lims = self._parent.get_ylim()
set_lim = self.set_ylim
trans = self.yaxis.get_transform()
order = lims[0] < lims[1]
lims = self._functions[0](np.array(lims))
neworder = lims[0] < lims[1]
if neworder != order:
# flip because the transform will take care of the flipping..
lims = lims[::-1]
set_lim(lims)
def get_tightbbox(self, renderer, call_axes_locator=True):
"""
Return the tight bounding box of the axes.
The dimension of the Bbox in canvas coordinate.
If *call_axes_locator* is *False*, it does not call the
_axes_locator attribute, which is necessary to get the correct
bounding box. ``call_axes_locator==False`` can be used if the
caller is only intereted in the relative size of the tightbbox
compared to the axes bbox.
"""
bb = []
if not self.get_visible():
return None
self._set_lims()
locator = self.get_axes_locator()
if locator and call_axes_locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._orientation == 'x':
bb_axis = self.xaxis.get_tightbbox(renderer)
else:
bb_axis = self.yaxis.get_tightbbox(renderer)
if bb_axis:
bb.append(bb_axis)
bb.append(self.get_window_extent(renderer))
_bbox = mtransforms.Bbox.union(
[b for b in bb if b.width != 0 or b.height != 0])
return _bbox
def set_aspect(self, *args, **kwargs):
"""
Secondary axes cannot set the aspect ratio, so calling this just
sets a warning.
"""
cbook._warn_external("Secondary axes can't set the aspect ratio")
def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the x-axis.
Parameters
----------
xlabel : str
The label text.
labelpad : scalar, optional, default: None
Spacing in points between the label and the x-axis.
Other Parameters
----------------
**kwargs : `.Text` properties
`.Text` properties control the appearance of the label.
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.xaxis.labelpad = labelpad
return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the x-axis.
Parameters
----------
ylabel : str
The label text.
labelpad : scalar, optional, default: None
Spacing in points between the label and the x-axis.
Other Parameters
----------------
**kwargs : `.Text` properties
`.Text` properties control the appearance of the label.
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.yaxis.labelpad = labelpad
return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
def set_color(self, color):
"""
Change the color of the secondary axes and all decorators
Parameters
----------
color : Matplotlib color
"""
if self._orientation == 'x':
self.tick_params(axis='x', colors=color)
self.spines['bottom'].set_color(color)
self.spines['top'].set_color(color)
self.xaxis.label.set_color(color)
else:
self.tick_params(axis='y', colors=color)
self.spines['left'].set_color(color)
self.spines['right'].set_color(color)
self.yaxis.label.set_color(color)
_secax_docstring = '''
Warnings
--------
This method is experimental as of 3.1, and the API may change.
Parameters
----------
location : string or scalar
The position to put the secondary axis. Strings can be 'top' or
'bottom', for x-oriented axises or 'left' or 'right' for y-oriented axises
or a scalar can be a float indicating the relative position
on the axes to put the new axes (0 being the bottom (left), and 1.0 being
the top (right).)
functions : 2-tuple of func, or Transform with an inverse
If a 2-tuple of functions, the user specifies the transform
function and its inverse. i.e.
`functions=(lambda x: 2 / x, lambda x: 2 / x)` would be an
reciprocal transform with a factor of 2.
The user can also directly supply a subclass of
`.transforms.Transform` so long as it has an inverse.
See :doc:`/gallery/subplots_axes_and_figures/secondary_axis`
for examples of making these conversions.
Other Parameters
----------------
**kwargs : `~matplotlib.axes.Axes` properties.
Other miscellaneous axes parameters.
Returns
-------
ax : axes._secondary_axes.SecondaryAxis
'''
docstring.interpd.update(_secax_docstring=_secax_docstring)
|
155dffd11ef019cf92699b6c2f9a630ce8a7571dd2ba3bc41bfaf09b244bb1e8
|
import functools
import uuid
from matplotlib import cbook, docstring
import matplotlib.artist as martist
from matplotlib.axes._axes import Axes
from matplotlib.gridspec import GridSpec, SubplotSpec
import matplotlib._layoutbox as layoutbox
class SubplotBase(object):
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args) == 1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = map(int, s)
except ValueError:
raise ValueError('Single argument to subplot must be '
'a 3-digit integer')
self._subplotspec = GridSpec(rows, cols,
figure=self.figure)[num - 1]
# num - 1 for converting from MATLAB to python indexing
elif len(args) == 3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if rows <= 0:
raise ValueError(f'Number of rows must be > 0, not {rows}')
if cols <= 0:
raise ValueError(f'Number of columns must be > 0, not {cols}')
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(
rows, cols,
figure=self.figure)[(num[0] - 1):num[1]]
else:
if num < 1 or num > rows*cols:
raise ValueError(
f"num must be 1 <= num <= {rows*cols}, not {num}")
self._subplotspec = GridSpec(
rows, cols, figure=self.figure)[int(num) - 1]
# num - 1 for converting from MATLAB to python indexing
else:
raise ValueError(f'Illegal argument(s) to subplot: {args}')
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
# add a layout box to this, for both the full axis, and the poss
# of the axis. We need both because the axes may become smaller
# due to parasitic axes and hence no longer fill the subplotspec.
if self._subplotspec._layoutbox is None:
self._layoutbox = None
self._poslayoutbox = None
else:
name = self._subplotspec._layoutbox.name + '.ax'
name = name + layoutbox.seq_id()
self._layoutbox = layoutbox.LayoutBox(
parent=self._subplotspec._layoutbox,
name=name,
artist=self)
self._poslayoutbox = layoutbox.LayoutBox(
parent=self._layoutbox,
name=self._layoutbox.name+'.pos',
pos=True, subplot=True, artist=self)
def __reduce__(self):
# get the first axes class which does not inherit from a subplotbase
axes_class = next(
c for c in type(self).__mro__
if issubclass(c, Axes) and not issubclass(c, SubplotBase))
return (_picklable_subplot_class_constructor,
(axes_class,),
self.__getstate__())
def get_geometry(self):
"""get the subplot geometry, e.g., 2,2,3"""
rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
return rows, cols, num1 + 1 # for compatibility
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
"""change subplot geometry, e.g., from 1,1,1 to 2,2,3"""
self._subplotspec = GridSpec(numrows, numcols,
figure=self.figure)[num - 1]
self.update_params()
self.set_position(self.figbox)
def get_subplotspec(self):
"""get the SubplotSpec instance associated with the subplot"""
return self._subplotspec
def set_subplotspec(self, subplotspec):
"""set the SubplotSpec instance associated with the subplot"""
self._subplotspec = subplotspec
def get_gridspec(self):
"""get the GridSpec instance associated with the subplot"""
return self._subplotspec.get_gridspec()
def update_params(self):
"""update the subplot position from fig.subplotpars"""
self.figbox, self.rowNum, self.colNum, self.numRows, self.numCols = \
self.get_subplotspec().get_position(self.figure,
return_all=True)
def is_first_col(self):
return self.colNum == 0
def is_first_row(self):
return self.rowNum == 0
def is_last_row(self):
return self.rowNum == self.numRows - 1
def is_last_col(self):
return self.colNum == self.numCols - 1
# COVERAGE NOTE: Never used internally.
def label_outer(self):
"""Only show "outer" labels and tick labels.
x-labels are only kept for subplots on the last row; y-labels only for
subplots on the first column.
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
if not lastrow:
for label in self.get_xticklabels(which="both"):
label.set_visible(False)
self.get_xaxis().get_offset_text().set_visible(False)
self.set_xlabel("")
if not firstcol:
for label in self.get_yticklabels(which="both"):
label.set_visible(False)
self.get_yaxis().get_offset_text().set_visible(False)
self.set_ylabel("")
def _make_twin_axes(self, *args, **kwargs):
"""
Make a twinx axes of self. This is used for twinx and twiny.
"""
if 'sharex' in kwargs and 'sharey' in kwargs:
# The following line is added in v2.2 to avoid breaking Seaborn,
# which currently uses this internal API.
if kwargs["sharex"] is not self and kwargs["sharey"] is not self:
raise ValueError("Twinned Axes may share only one axis")
# The dance here with label is to force add_subplot() to create a new
# Axes (by passing in a label never seen before). Note that this does
# not affect plot reactivation by subplot() as twin axes can never be
# reactivated by subplot().
sentinel = str(uuid.uuid4())
real_label = kwargs.pop("label", sentinel)
twin = self.figure.add_subplot(
self.get_subplotspec(), *args, label=sentinel, **kwargs)
if real_label is not sentinel:
twin.set_label(real_label)
self.set_adjustable('datalim')
twin.set_adjustable('datalim')
if self._layoutbox is not None and twin._layoutbox is not None:
# make the layout boxes be explicitly the same
twin._layoutbox.constrain_same(self._layoutbox)
twin._poslayoutbox.constrain_same(self._poslayoutbox)
self._twinned_axes.join(self, twin)
return twin
# this here to support cartopy which was using a private part of the
# API to register their Axes subclasses.
# In 3.1 this should be changed to a dict subclass that warns on use
# In 3.3 to a dict subclass that raises a useful exception on use
# In 3.4 should be removed
# The slow timeline is to give cartopy enough time to get several
# release out before we break them.
_subplot_classes = {}
@functools.lru_cache(None)
def subplot_class_factory(axes_class=None):
"""
This makes a new class that inherits from `.SubplotBase` and the
given axes_class (which is assumed to be a subclass of `.axes.Axes`).
This is perhaps a little bit roundabout to make a new class on
the fly like this, but it means that a new Subplot class does
not have to be created for every type of Axes.
"""
if axes_class is None:
axes_class = Axes
try:
# Avoid creating two different instances of GeoAxesSubplot...
# Only a temporary backcompat fix. This should be removed in
# 3.4
return next(cls for cls in SubplotBase.__subclasses__()
if cls.__bases__ == (SubplotBase, axes_class))
except StopIteration:
return type("%sSubplot" % axes_class.__name__,
(SubplotBase, axes_class),
{'_axes_class': axes_class})
# This is provided for backward compatibility
Subplot = subplot_class_factory()
def _picklable_subplot_class_constructor(axes_class):
"""
This stub class exists to return the appropriate subplot class when called
with an axes class. This is purely to allow pickling of Axes and Subplots.
"""
subplot_class = subplot_class_factory(axes_class)
return subplot_class.__new__(subplot_class)
docstring.interpd.update(Axes=martist.kwdoc(Axes))
docstring.dedent_interpd(Axes.__init__)
docstring.interpd.update(Subplot=martist.kwdoc(Axes))
|
ab3d23a6e9602ca857b7c8b365a476e8050dd7d91c55694f931aee28f5efa2b1
|
import contextlib
import functools
import inspect
import warnings
class MatplotlibDeprecationWarning(UserWarning):
"""
A class for issuing deprecation warnings for Matplotlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
mplDeprecation = MatplotlibDeprecationWarning
"""mplDeprecation is deprecated. Use MatplotlibDeprecationWarning instead."""
def _generate_deprecation_warning(
since, message='', name='', alternative='', pending=False, obj_type='',
addendum='', *, removal=''):
if pending:
if removal:
raise ValueError(
"A pending deprecation cannot have a scheduled removal")
else:
if removal:
removal = "in {}".format(removal)
else:
removal = {"2.2": "in 3.1", "3.0": "in 3.2", "3.1": "in 3.3"}.get(
since, "two minor releases later")
if not message:
message = (
"\nThe %(name)s %(obj_type)s"
+ (" will be deprecated in a future version"
if pending else
(" was deprecated in Matplotlib %(since)s"
+ (" and will be removed %(removal)s"
if removal else
"")))
+ "."
+ (" Use %(alternative)s instead." if alternative else "")
+ (" %(addendum)s" if addendum else ""))
warning_cls = (PendingDeprecationWarning if pending
else MatplotlibDeprecationWarning)
return warning_cls(message % dict(
func=name, name=name, obj_type=obj_type, since=since, removal=removal,
alternative=alternative, addendum=addendum))
def warn_deprecated(
since, *, message='', name='', alternative='', pending=False,
obj_type='', addendum='', removal=''):
"""
Used to display deprecation in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative API that the user may use in place of the deprecated
API. The deprecation warning will tell the user about this alternative
if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning. Cannot be used together with *removal*.
obj_type : str, optional
The object type being deprecated.
addendum : str, optional
Additional text appended directly to the final message.
removal : str, optional
The expected removal version. With the default (an empty string), a
removal version is automatically computed from *since*. Set to other
Falsy values to not schedule a removal date. Cannot be used together
with *pending*.
Examples
--------
Basic example::
# To warn of the deprecation of "matplotlib.name_of_module"
warn_deprecated('1.4.0', name='matplotlib.name_of_module',
obj_type='module')
"""
warning = _generate_deprecation_warning(
since, message, name, alternative, pending, obj_type, addendum,
removal=removal)
from . import _warn_external
_warn_external(warning)
def deprecated(since, *, message='', name='', alternative='', pending=False,
obj_type=None, addendum='', removal=''):
"""
Decorator to mark a function, a class, or a property as deprecated.
When deprecating a classmethod, a staticmethod, or a property, the
``@deprecated`` decorator should go *under* the ``@classmethod``, etc.
decorator (i.e., `deprecated` should directly decorate the underlying
callable).
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the object,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
object.
name : str, optional
The name of the deprecated object; if not provided the name
is automatically determined from the passed in object,
though this is useful in the case of renamed functions, where
the new function is just assigned to the name of the
deprecated function. For example::
def new_function():
...
old_function = new_function
alternative : str, optional
An alternative API that the user may use in place of the deprecated
API. The deprecation warning will tell the user about this alternative
if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning. Cannot be used together with *removal*.
obj_type : str, optional
The object type being deprecated; by default, 'function' if decorating
a function and 'class' if decorating a class.
addendum : str, optional
Additional text appended directly to the final message.
removal : str, optional
The expected removal version. With the default (an empty string), a
removal version is automatically computed from *since*. Set to other
Falsy values to not schedule a removal date. Cannot be used together
with *pending*.
Examples
--------
Basic example::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, obj_type=obj_type, addendum=addendum):
if isinstance(obj, type):
if obj_type is None:
obj_type = "class"
func = obj.__init__
name = name or obj.__name__
old_doc = obj.__doc__
def finalize(wrapper, new_doc):
try:
obj.__doc__ = new_doc
except AttributeError: # Can't set on some extension objects.
pass
obj.__init__ = wrapper
return obj
elif isinstance(obj, property):
obj_type = "attribute"
func = None
name = name or obj.fget.__name__
old_doc = obj.__doc__
class _deprecated_property(property):
def __get__(self, instance, owner):
if instance is not None:
from . import _warn_external
_warn_external(warning)
return super().__get__(instance, owner)
def __set__(self, instance, value):
if instance is not None:
from . import _warn_external
_warn_external(warning)
return super().__set__(instance, value)
def __delete__(self, instance):
if instance is not None:
from . import _warn_external
_warn_external(warning)
return super().__delete__(instance)
def finalize(_, new_doc):
return _deprecated_property(
fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc)
else:
if obj_type is None:
obj_type = "function"
func = obj
name = name or obj.__name__
old_doc = func.__doc__
def finalize(wrapper, new_doc):
wrapper = functools.wraps(func)(wrapper)
wrapper.__doc__ = new_doc
return wrapper
warning = _generate_deprecation_warning(
since, message, name, alternative, pending, obj_type, addendum,
removal=removal)
def wrapper(*args, **kwargs):
from . import _warn_external
_warn_external(warning)
return func(*args, **kwargs)
old_doc = inspect.cleandoc(old_doc or '').strip('\n')
message = message.strip()
new_doc = ('[*Deprecated*] {old_doc}\n'
'\n'
'.. deprecated:: {since}\n'
' {message}'
.format(since=since, message=message, old_doc=old_doc))
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return finalize(wrapper, new_doc)
return deprecate
def _rename_parameter(since, old, new, func=None):
"""
Decorator indicating that parameter *old* of *func* is renamed to *new*.
The actual implementation of *func* should use *new*, not *old*. If *old*
is passed to *func*, a DeprecationWarning is emitted, and its value is
used, even if *new* is also passed by keyword (this is to simplify pyplot
wrapper functions, which always pass *new* explicitly to the Axes method).
If *new* is also passed but positionally, a TypeError will be raised by the
underlying function during argument binding.
Examples
--------
::
@_rename_parameter("3.1", "bad_name", "good_name")
def func(good_name): ...
"""
if func is None:
return functools.partial(_rename_parameter, since, old, new)
signature = inspect.signature(func)
assert old not in signature.parameters, (
f"Matplotlib internal error: {old!r} cannot be a parameter for "
f"{func.__name__}()")
assert new in signature.parameters, (
f"Matplotlib internal error: {new!r} must be a parameter for "
f"{func.__name__}()")
@functools.wraps(func)
def wrapper(*args, **kwargs):
if old in kwargs:
warn_deprecated(
since, message=f"The {old!r} parameter of {func.__name__}() "
f"has been renamed {new!r} since Matplotlib {since}; support "
f"for the old name will be dropped %(removal)s.")
kwargs[new] = kwargs.pop(old)
return func(*args, **kwargs)
# wrapper() must keep the same documented signature as func(): if we
# instead made both *old* and *new* appear in wrapper()'s signature, they
# would both show up in the pyplot function for an Axes method as well and
# pyplot would explicitly pass both arguments to the Axes method.
return wrapper
class _deprecated_parameter_class:
def __repr__(self):
return "<deprecated parameter>"
_deprecated_parameter = _deprecated_parameter_class()
def _delete_parameter(since, name, func=None):
"""
Decorator indicating that parameter *name* of *func* is being deprecated.
The actual implementation of *func* should keep the *name* parameter in its
signature.
Parameters that come after the deprecated parameter effectively become
keyword-only (as they cannot be passed positionally without triggering the
DeprecationWarning on the deprecated parameter), and should be marked as
such after the deprecation period has passed and the deprecated parameter
is removed.
Examples
--------
::
@_delete_parameter("3.1", "unused")
def func(used_arg, other_arg, unused, more_args): ...
"""
if func is None:
return functools.partial(_delete_parameter, since, name)
signature = inspect.signature(func)
assert name in signature.parameters, (
f"Matplotlib internal error: {name!r} must be a parameter for "
f"{func.__name__}()")
func.__signature__ = signature.replace(parameters=[
param.replace(default=_deprecated_parameter) if param.name == name
else param
for param in signature.parameters.values()])
@functools.wraps(func)
def wrapper(*args, **kwargs):
arguments = func.__signature__.bind(*args, **kwargs).arguments
# We cannot just check `name not in arguments` because the pyplot
# wrappers always pass all arguments explicitly.
if name in arguments and arguments[name] != _deprecated_parameter:
warn_deprecated(
since, message=f"The {name!r} parameter of {func.__name__}() "
f"is deprecated since Matplotlib {since} and will be removed "
f"%(removal)s. If any parameter follows {name!r}, they "
f"should be pass as keyword, not positionally.")
return func(*args, **kwargs)
return wrapper
def _make_keyword_only(since, name, func=None):
"""
Decorator indicating that passing parameter *name* (or any of the following
ones) positionally to *func* is being deprecated.
Note that this decorator **cannot** be applied to a function that has a
pyplot-level wrapper, as the wrapper always pass all arguments by keyword.
If it is used, users will see spurious DeprecationWarnings every time they
call the pyplot wrapper.
"""
if func is None:
return functools.partial(_make_keyword_only, since, name)
signature = inspect.signature(func)
POK = inspect.Parameter.POSITIONAL_OR_KEYWORD
KWO = inspect.Parameter.KEYWORD_ONLY
assert (name in signature.parameters
and signature.parameters[name].kind == POK), (
f"Matplotlib internal error: {name!r} must be a positional-or-keyword "
f"parameter for {func.__name__}()")
names = [*signature.parameters]
kwonly = [name for name in names[names.index(name):]
if signature.parameters[name].kind == POK]
func.__signature__ = signature.replace(parameters=[
param.replace(kind=inspect.Parameter.KEYWORD_ONLY)
if param.name in kwonly
else param
for param in signature.parameters.values()])
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound = signature.bind(*args, **kwargs)
if name in bound.arguments and name not in kwargs:
warn_deprecated(
since, message="Passing the %(name)s %(obj_type)s "
"positionally is deprecated since Matplotlib %(since)s; the "
"parameter will become keyword-only %(removal)s.",
name=name, obj_type=f"parameter of {func.__name__}()")
return func(*args, **kwargs)
return wrapper
@contextlib.contextmanager
def _suppress_matplotlib_deprecation_warning():
with warnings.catch_warnings():
warnings.simplefilter("ignore", MatplotlibDeprecationWarning)
yield
|
40b8ad5b65fb14cc75121d7cbb6d958673242b7aaf9e30332d4f212fb317deb5
|
"""
A collection of utility functions and classes. Originally, many
(but not all) were from the Python Cookbook -- hence the name cbook.
This module is safe to import from anywhere within matplotlib;
it imports matplotlib only at runtime.
"""
import collections
import collections.abc
import contextlib
import functools
import glob
import gzip
import itertools
import locale
import numbers
import operator
import os
from pathlib import Path
import re
import subprocess
import sys
import time
import traceback
import types
import warnings
import weakref
from weakref import WeakMethod
import numpy as np
import matplotlib
from .deprecation import (
deprecated, warn_deprecated,
_rename_parameter, _delete_parameter, _make_keyword_only,
_suppress_matplotlib_deprecation_warning,
MatplotlibDeprecationWarning, mplDeprecation)
@deprecated("3.0")
def unicode_safe(s):
if isinstance(s, bytes):
try:
# On some systems, locale.getpreferredencoding returns None,
# which can break unicode; and the sage project reports that
# some systems have incorrect locale specifications, e.g.,
# an encoding instead of a valid locale name. Another
# pathological case that has been reported is an empty string.
# On some systems, getpreferredencoding sets the locale, which has
# side effects. Passing False eliminates those side effects.
preferredencoding = locale.getpreferredencoding(
matplotlib.rcParams['axes.formatter.use_locale']).strip()
if not preferredencoding:
preferredencoding = None
except (ValueError, ImportError, AttributeError):
preferredencoding = None
if preferredencoding is None:
return str(s)
else:
return str(s, preferredencoding)
return s
def _exception_printer(exc):
traceback.print_exc()
class _StrongRef:
"""
Wrapper similar to a weakref, but keeping a strong reference to the object.
"""
def __init__(self, obj):
self._obj = obj
def __call__(self):
return self._obj
def __eq__(self, other):
return isinstance(other, _StrongRef) and self._obj == other._obj
def __hash__(self):
return hash(self._obj)
class CallbackRegistry(object):
"""Handle registering and disconnecting for a set of signals and callbacks:
>>> def oneat(x):
... print('eat', x)
>>> def ondrink(x):
... print('drink', x)
>>> from matplotlib.cbook import CallbackRegistry
>>> callbacks = CallbackRegistry()
>>> id_eat = callbacks.connect('eat', oneat)
>>> id_drink = callbacks.connect('drink', ondrink)
>>> callbacks.process('drink', 123)
drink 123
>>> callbacks.process('eat', 456)
eat 456
>>> callbacks.process('be merry', 456) # nothing will be called
>>> callbacks.disconnect(id_eat)
>>> callbacks.process('eat', 456) # nothing will be called
In practice, one should always disconnect all callbacks when they are
no longer needed to avoid dangling references (and thus memory leaks).
However, real code in Matplotlib rarely does so, and due to its design,
it is rather difficult to place this kind of code. To get around this,
and prevent this class of memory leaks, we instead store weak references
to bound methods only, so when the destination object needs to die, the
CallbackRegistry won't keep it alive.
Parameters
----------
exception_handler : callable, optional
If provided must have signature ::
def handler(exc: Exception) -> None:
If not None this function will be called with any `Exception`
subclass raised by the callbacks in `CallbackRegistry.process`.
The handler may either consume the exception or re-raise.
The callable must be pickle-able.
The default handler is ::
def h(exc):
traceback.print_exc()
"""
# We maintain two mappings:
# callbacks: signal -> {cid -> callback}
# _func_cid_map: signal -> {callback -> cid}
# (actually, callbacks are weakrefs to the actual callbacks).
def __init__(self, exception_handler=_exception_printer):
self.exception_handler = exception_handler
self.callbacks = {}
self._cid_gen = itertools.count()
self._func_cid_map = {}
# In general, callbacks may not be pickled; thus, we simply recreate an
# empty dictionary at unpickling. In order to ensure that `__setstate__`
# (which just defers to `__init__`) is called, `__getstate__` must
# return a truthy value (for pickle protocol>=3, i.e. Py3, the
# *actual* behavior is that `__setstate__` will be called as long as
# `__getstate__` does not return `None`, but this is undocumented -- see
# http://bugs.python.org/issue12290).
def __getstate__(self):
return {'exception_handler': self.exception_handler}
def __setstate__(self, state):
self.__init__(**state)
def connect(self, s, func):
"""Register *func* to be called when signal *s* is generated.
"""
self._func_cid_map.setdefault(s, {})
try:
proxy = WeakMethod(func, self._remove_proxy)
except TypeError:
proxy = _StrongRef(func)
if proxy in self._func_cid_map[s]:
return self._func_cid_map[s][proxy]
cid = next(self._cid_gen)
self._func_cid_map[s][proxy] = cid
self.callbacks.setdefault(s, {})
self.callbacks[s][cid] = proxy
return cid
def _remove_proxy(self, proxy):
for signal, proxies in list(self._func_cid_map.items()):
try:
del self.callbacks[signal][proxies[proxy]]
except KeyError:
pass
if len(self.callbacks[signal]) == 0:
del self.callbacks[signal]
del self._func_cid_map[signal]
def disconnect(self, cid):
"""Disconnect the callback registered with callback id *cid*.
"""
for eventname, callbackd in list(self.callbacks.items()):
try:
del callbackd[cid]
except KeyError:
continue
else:
for signal, functions in list(self._func_cid_map.items()):
for function, value in list(functions.items()):
if value == cid:
del functions[function]
return
def process(self, s, *args, **kwargs):
"""
Process signal *s*.
All of the functions registered to receive callbacks on *s* will be
called with ``*args`` and ``**kwargs``.
"""
for cid, ref in list(self.callbacks.get(s, {}).items()):
func = ref()
if func is not None:
try:
func(*args, **kwargs)
# this does not capture KeyboardInterrupt, SystemExit,
# and GeneratorExit
except Exception as exc:
if self.exception_handler is not None:
self.exception_handler(exc)
else:
raise
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a given type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None:
self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
__str__ = __repr__
def __getstate__(self):
# store a dictionary of this SilentList's state
return {'type': self.type, 'seq': self[:]}
def __setstate__(self, state):
self.type = state['type']
self.extend(state['seq'])
class IgnoredKeywordWarning(UserWarning):
"""
A class for issuing warnings about keyword arguments that will be ignored
by matplotlib
"""
pass
def local_over_kwdict(local_var, kwargs, *keys):
"""
Enforces the priority of a local variable over potentially conflicting
argument(s) from a kwargs dict. The following possible output values are
considered in order of priority:
local_var > kwargs[keys[0]] > ... > kwargs[keys[-1]]
The first of these whose value is not None will be returned. If all are
None then None will be returned. Each key in keys will be removed from the
kwargs dict in place.
Parameters
----------
local_var : any object
The local variable (highest priority)
kwargs : dict
Dictionary of keyword arguments; modified in place
keys : str(s)
Name(s) of keyword arguments to process, in descending order of
priority
Returns
-------
out : any object
Either local_var or one of kwargs[key] for key in keys
Raises
------
IgnoredKeywordWarning
For each key in keys that is removed from kwargs but not used as
the output value
"""
out = local_var
for key in keys:
kwarg_val = kwargs.pop(key, None)
if kwarg_val is not None:
if out is None:
out = kwarg_val
else:
_warn_external('"%s" keyword argument will be ignored' % key,
IgnoredKeywordWarning)
return out
def strip_math(s):
"""
Remove latex formatting from mathtext.
Only handles fully math and fully non-math strings.
"""
if len(s) >= 2 and s[0] == s[-1] == "$":
s = s[1:-1]
for tex, plain in [
(r"\times", "x"), # Specifically for Formatter support.
(r"\mathdefault", ""),
(r"\rm", ""),
(r"\cal", ""),
(r"\tt", ""),
(r"\it", ""),
("\\", ""),
("{", ""),
("}", ""),
]:
s = s.replace(tex, plain)
return s
@deprecated('3.0', alternative='types.SimpleNamespace')
class Bunch(types.SimpleNamespace):
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables::
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
"""
pass
@deprecated('3.1', alternative='np.iterable')
def iterable(obj):
"""return true if *obj* is iterable"""
try:
iter(obj)
except TypeError:
return False
return True
@deprecated("3.1", alternative="isinstance(..., collections.abc.Hashable)")
def is_hashable(obj):
"""Returns true if *obj* can be hashed"""
try:
hash(obj)
except TypeError:
return False
return True
def is_writable_file_like(obj):
"""Return whether *obj* looks like a file object with a *write* method."""
return callable(getattr(obj, 'write', None))
def file_requires_unicode(x):
"""
Return whether the given writable file-like object requires Unicode to be
written to it.
"""
try:
x.write(b'')
except TypeError:
return True
else:
return False
@deprecated('3.0', alternative='isinstance(..., numbers.Number)')
def is_numlike(obj):
"""return true if *obj* looks like a number"""
return isinstance(obj, (numbers.Number, np.number))
def to_filehandle(fname, flag='r', return_opened=False, encoding=None):
"""
Convert a path to an open file handle or pass-through a file-like object.
Consider using `open_file_cm` instead, as it allows one to properly close
newly created file objects more easily.
Parameters
----------
fname : str or PathLike or file-like object
If `str` or `os.PathLike`, the file is opened using the flags specified
by *flag* and *encoding*. If a file-like object, it is passed through.
flag : str, default 'r'
Passed as the *mode* argument to `open` when *fname* is `str` or
`os.PathLike`; ignored if *fname* is file-like.
return_opened : bool, default False
If True, return both the file object and a boolean indicating whether
this was a new file (that the caller needs to close). If False, return
only the new file.
encoding : str or None, default None
Passed as the *mode* argument to `open` when *fname* is `str` or
`os.PathLike`; ignored if *fname* is file-like.
Returns
-------
fh : file-like
opened : bool
*opened* is only returned if *return_opened* is True.
"""
if isinstance(fname, os.PathLike):
fname = os.fspath(fname)
if isinstance(fname, str):
if fname.endswith('.gz'):
# get rid of 'U' in flag for gzipped files.
flag = flag.replace('U', '')
fh = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
# python may not be complied with bz2 support,
# bury import until we need it
import bz2
# get rid of 'U' in flag for bz2 files
flag = flag.replace('U', '')
fh = bz2.BZ2File(fname, flag)
else:
fh = open(fname, flag, encoding=encoding)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a PathLike or file handle')
if return_opened:
return fh, opened
return fh
@contextlib.contextmanager
def open_file_cm(path_or_file, mode="r", encoding=None):
r"""Pass through file objects and context-manage `.PathLike`\s."""
fh, opened = to_filehandle(path_or_file, mode, True, encoding)
if opened:
with fh:
yield fh
else:
yield fh
def is_scalar_or_string(val):
"""Return whether the given object is a scalar or string like."""
return isinstance(val, str) or not np.iterable(val)
def get_sample_data(fname, asfileobj=True):
"""
Return a sample data file. *fname* is a path relative to the
`mpl-data/sample_data` directory. If *asfileobj* is `True`
return a file object, otherwise just a file path.
Set the rc parameter examples.directory to the directory where we should
look, if sample_data files are stored in a location different than
default (which is 'mpl-data/sample_data` at the same level of 'matplotlib`
Python module files).
If the filename ends in .gz, the file is implicitly ungzipped.
"""
# Don't trigger deprecation warning when just fetching.
if dict.__getitem__(matplotlib.rcParams, 'examples.directory'):
root = matplotlib.rcParams['examples.directory']
else:
root = os.path.join(matplotlib._get_data_path(), 'sample_data')
path = os.path.join(root, fname)
if asfileobj:
if os.path.splitext(fname)[-1].lower() in ['.csv', '.xrc', '.txt']:
mode = 'r'
else:
mode = 'rb'
base, ext = os.path.splitext(fname)
if ext == '.gz':
return gzip.open(path, mode)
else:
return open(path, mode)
else:
return path
def flatten(seq, scalarp=is_scalar_or_string):
"""
Return a generator of flattened nested containers
For example:
>>> from matplotlib.cbook import flatten
>>> l = (('John', ['Hunter']), (1, 23), [[([42, (5, 23)], )]])
>>> print(list(flatten(l)))
['John', 'Hunter', 1, 23, 42, 5, 23]
By: Composite of Holger Krekel and Luther Blissett
From: https://code.activestate.com/recipes/121294/
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item) or item is None:
yield item
else:
yield from flatten(item, scalarp)
@deprecated("3.0")
def mkdirs(newdir, mode=0o777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
# this functionality is now in core python as of 3.2
# LPY DROP
os.makedirs(newdir, mode=mode, exist_ok=True)
@deprecated('3.0')
class GetRealpathAndStat(object):
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
@functools.lru_cache()
def get_realpath_and_stat(path):
realpath = os.path.realpath(path)
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
return realpath, stat_key
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile(r"(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
@deprecated("3.1", alternative="inspect.cleandoc")
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
@deprecated("3.0")
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path
import fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
results = []
for dirname, dirs, files in os.walk(root):
# Append to results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if return_folders or os.path.isfile(fullname):
for pattern in pattern_list:
if fnmatch.fnmatch(name, pattern):
results.append(fullname)
break
# Block recursion if recursion was disallowed
if not recurse:
break
return results
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to constrain the size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if k not in self:
if len(self) >= self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
self._killkeys.append(k)
dict.__setitem__(self, k, v)
class Stack(object):
"""
Stack of elements with a movable cursor.
Mimics home/back/forward in a web browser.
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
"""Return the current element, or None."""
if not len(self._elements):
return self._default
else:
return self._elements[self._pos]
def __len__(self):
return len(self._elements)
def __getitem__(self, ind):
return self._elements[ind]
def forward(self):
"""Move the position forward and return the current element."""
self._pos = min(self._pos + 1, len(self._elements) - 1)
return self()
def back(self):
"""Move the position back and return the current element."""
if self._pos > 0:
self._pos -= 1
return self()
def push(self, o):
"""
Push *o* to the stack at current position. Discard all later elements.
*o* is returned.
"""
self._elements = self._elements[:self._pos + 1] + [o]
self._pos = len(self._elements) - 1
return self()
def home(self):
"""
Push the first element onto the top of the stack.
The first element is returned.
"""
if not len(self._elements):
return
self.push(self._elements[0])
return self()
def empty(self):
"""Return whether the stack is empty."""
return len(self._elements) == 0
def clear(self):
"""Empty the stack."""
self._pos = -1
self._elements = []
def bubble(self, o):
"""
Raise *o* to the top of the stack. *o* must be present in the stack.
*o* is returned.
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso == o:
bubbles.append(thiso)
else:
self.push(thiso)
for _ in bubbles:
self.push(o)
return o
def remove(self, o):
"""Remove *o* from the stack."""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso != o:
self.push(thiso)
def report_memory(i=0): # argument may go away
"""Return the memory consumed by the process."""
def call(command, os_name):
try:
return subprocess.check_output(command)
except subprocess.CalledProcessError:
raise NotImplementedError(
"report_memory works on %s only if "
"the '%s' program is found" % (os_name, command[0])
)
pid = os.getpid()
if sys.platform == 'sunos5':
lines = call(['ps', '-p', '%d' % pid, '-o', 'osz'], 'Sun OS')
mem = int(lines[-1].strip())
elif sys.platform == 'linux':
lines = call(['ps', '-p', '%d' % pid, '-o', 'rss,sz'], 'Linux')
mem = int(lines[1].split()[1])
elif sys.platform == 'darwin':
lines = call(['ps', '-p', '%d' % pid, '-o', 'rss,vsz'], 'Mac OS')
mem = int(lines[1].split()[0])
elif sys.platform == 'win32':
lines = call(["tasklist", "/nh", "/fi", "pid eq %d" % pid], 'Windows')
mem = int(lines.strip().split()[-2].replace(',', ''))
else:
raise NotImplementedError(
"We don't have a memory monitor for %s" % sys.platform)
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
@deprecated("3.1")
def safezip(*args):
"""make sure *args* are equal len before zipping"""
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i + 1, len(arg)))
return list(zip(*args))
def safe_masked_invalid(x, copy=False):
x = np.array(x, subok=True, copy=copy)
if not x.dtype.isnative:
# Note that the argument to `byteswap` is 'inplace',
# thus if we have already made a copy, do the byteswap in
# place, else make a copy with the byte order swapped.
# Be explicit that we are swapping the byte order of the dtype
x = x.byteswap(copy).newbyteorder('S')
try:
xm = np.ma.masked_invalid(x, copy=False)
xm.shrink_mask()
except TypeError:
return x
return xm
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % type(step))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[{!r}]".format(key))
break
if key is next:
outstream.write("[key] = {!r}".format(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, types.FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write(f"Examining: {obj!r}\n")
recurse(obj, obj, {}, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retrieved by
using the object as an iterator.
The objects being joined must be hashable and weak-referenceable.
For example:
>>> from matplotlib.cbook import Grouper
>>> class Foo(object):
... def __init__(self, s):
... self.s = s
... def __repr__(self):
... return self.s
...
>>> a, b, c, d, e, f = [Foo(x) for x in 'abcdef']
>>> grp = Grouper()
>>> grp.join(a, b)
>>> grp.join(b, c)
>>> grp.join(d, e)
>>> sorted(map(tuple, grp))
[(a, b, c), (d, e)]
>>> grp.joined(a, b)
True
>>> grp.joined(a, c)
True
>>> grp.joined(a, d)
False
"""
def __init__(self, init=()):
self._mapping = {weakref.ref(x): [weakref.ref(x)] for x in init}
def __contains__(self, item):
return weakref.ref(item) in self._mapping
def clean(self):
"""Clean dead weak references from the dictionary."""
mapping = self._mapping
to_drop = [key for key in mapping if key() is None]
for key in to_drop:
val = mapping.pop(key)
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(weakref.ref(a), [weakref.ref(a)])
for arg in args:
set_b = mapping.get(weakref.ref(arg), [weakref.ref(arg)])
if set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""Return whether *a* and *b* are members of the same set."""
self.clean()
return (self._mapping.get(weakref.ref(a), object())
is self._mapping.get(weakref.ref(b)))
def remove(self, a):
self.clean()
set_a = self._mapping.pop(weakref.ref(a), None)
if set_a:
set_a.remove(weakref.ref(a))
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
unique_groups = {id(group): group for group in self._mapping.values()}
for group in unique_groups.values():
yield [x() for x in group]
def get_siblings(self, a):
"""Return all of the items joined with *a*, including itself."""
self.clean()
siblings = self._mapping.get(weakref.ref(a), [weakref.ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
"""
Resample an array with ``steps - 1`` points between original point pairs.
Parameters
----------
a : array, shape (n, ...)
steps : int
Returns
-------
array, shape ``((n - 1) * steps + 1, ...)``
Along each column of *a*, ``(steps - 1)`` points are introduced between
each original values; the values are linearly interpolated.
"""
fps = a.reshape((len(a), -1))
xp = np.arange(len(a)) * steps
x = np.arange((len(a) - 1) * steps + 1)
return (np.column_stack([np.interp(x, xp, fp) for fp in fps.T])
.reshape((len(x),) + a.shape[1:]))
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if is_scalar_or_string(args[0]):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if not isinstance(x, str) and np.iterable(x) and len(x) == nrecs:
seqlist[i] = True
if isinstance(x, np.ma.MaskedArray):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if isinstance(x, np.ma.MaskedArray):
masks.append(~np.ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except Exception: # Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = np.logical_and.reduce(masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x[igood]
for i, x in enumerate(margs):
if seqlist[i] and isinstance(x, np.ma.MaskedArray):
margs[i] = x.filled()
return margs
def _combine_masks(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments as masked arrays with a common mask.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2 and 4 if :meth:`np.isfinite`
does not yield a Boolean array. Category 3 is included to
support RGB or RGBA ndarrays, which are assumed to have only
valid values and which are passed through unchanged.
All input arguments that are not passed unchanged are returned
as masked arrays if any masked points are found, otherwise as
ndarrays.
"""
if not len(args):
return ()
if is_scalar_or_string(args[0]):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = [] # Output args; some may be modified.
seqlist = [False] * len(args) # Flags: True if output will be masked.
masks = [] # List of masks.
for i, x in enumerate(args):
if is_scalar_or_string(x) or len(x) != nrecs:
margs.append(x) # Leave it unmodified.
else:
if isinstance(x, np.ma.MaskedArray) and x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
x = np.asanyarray(x)
if x.ndim == 1:
x = safe_masked_invalid(x)
seqlist[i] = True
if np.ma.is_masked(x):
masks.append(np.ma.getmaskarray(x))
margs.append(x) # Possibly modified.
if len(masks):
mask = np.logical_or.reduce(masks)
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = np.ma.array(x, mask=mask)
return margs
def boxplot_stats(X, whis=1.5, bootstrap=None, labels=None,
autorange=False):
"""
Returns list of dictionaries of statistics used to draw a series
of box and whisker plots. The `Returns` section enumerates the
required keys of the dictionary. Users can skip this function and
pass a user-defined set of dictionaries to the new `axes.bxp` method
instead of relying on MPL to do the calculations.
Parameters
----------
X : array-like
Data that will be represented in the boxplots. Should have 2 or
fewer dimensions.
whis : float, string, or sequence (default = 1.5)
As a float, determines the reach of the whiskers beyond the
first and third quartiles. In other words, where IQR is the
interquartile range (`Q3-Q1`), the upper whisker will extend to last
datum less than `Q3 + whis*IQR`. Similarly, the lower whisker will
extend to the first datum greater than `Q1 - whis*IQR`.
Beyond the whiskers, data are considered outliers
and are plotted as individual points. This can be set to an
ascending sequence of percentiles (e.g., [5, 95]) to set the
whiskers at specific percentiles of the data. Finally, `whis`
can be the string ``'range'`` to force the whiskers to the
minimum and maximum of the data. In the edge case that the 25th
and 75th percentiles are equivalent, `whis` can be automatically
set to ``'range'`` via the `autorange` option.
bootstrap : int, optional
Number of times the confidence intervals around the median
should be bootstrapped (percentile method).
labels : array-like, optional
Labels for each dataset. Length must be compatible with
dimensions of `X`.
autorange : bool, optional (False)
When `True` and the data are distributed such that the 25th and 75th
percentiles are equal, ``whis`` is set to ``'range'`` such that the
whisker ends are at the minimum and maximum of the data.
Returns
-------
bxpstats : list of dict
A list of dictionaries containing the results for each column
of data. Keys of each dictionary are the following:
======== ===================================
Key Value Description
======== ===================================
label tick label for the boxplot
mean arithmetic mean value
med 50th percentile
q1 first quartile (25th percentile)
q3 third quartile (75th percentile)
cilo lower notch around the median
cihi upper notch around the median
whislo end of the lower whisker
whishi end of the upper whisker
fliers outliers
======== ===================================
Notes
-----
Non-bootstrapping approach to confidence interval uses Gaussian-
based asymptotic approximation:
.. math::
\\mathrm{med} \\pm 1.57 \\times \\frac{\\mathrm{iqr}}{\\sqrt{N}}
General approach from:
McGill, R., Tukey, J.W., and Larsen, W.A. (1978) "Variations of
Boxplots", The American Statistician, 32:12-16.
"""
def _bootstrap_median(data, N=5000):
# determine 95% confidence intervals of the median
M = len(data)
percentiles = [2.5, 97.5]
bs_index = np.random.randint(M, size=(N, M))
bsData = data[bs_index]
estimate = np.median(bsData, axis=1, overwrite_input=True)
CI = np.percentile(estimate, percentiles)
return CI
def _compute_conf_interval(data, med, iqr, bootstrap):
if bootstrap is not None:
# Do a bootstrap estimate of notch locations.
# get conf. intervals around median
CI = _bootstrap_median(data, N=bootstrap)
notch_min = CI[0]
notch_max = CI[1]
else:
N = len(data)
notch_min = med - 1.57 * iqr / np.sqrt(N)
notch_max = med + 1.57 * iqr / np.sqrt(N)
return notch_min, notch_max
# output is a list of dicts
bxpstats = []
# convert X to a list of lists
X = _reshape_2D(X, "X")
ncols = len(X)
if labels is None:
labels = itertools.repeat(None)
elif len(labels) != ncols:
raise ValueError("Dimensions of labels and X must be compatible")
input_whis = whis
for ii, (x, label) in enumerate(zip(X, labels)):
# empty dict
stats = {}
if label is not None:
stats['label'] = label
# restore whis to the input values in case it got changed in the loop
whis = input_whis
# note tricksiness, append up here and then mutate below
bxpstats.append(stats)
# if empty, bail
if len(x) == 0:
stats['fliers'] = np.array([])
stats['mean'] = np.nan
stats['med'] = np.nan
stats['q1'] = np.nan
stats['q3'] = np.nan
stats['cilo'] = np.nan
stats['cihi'] = np.nan
stats['whislo'] = np.nan
stats['whishi'] = np.nan
stats['med'] = np.nan
continue
# up-convert to an array, just to be safe
x = np.asarray(x)
# arithmetic mean
stats['mean'] = np.mean(x)
# medians and quartiles
q1, med, q3 = np.percentile(x, [25, 50, 75])
# interquartile range
stats['iqr'] = q3 - q1
if stats['iqr'] == 0 and autorange:
whis = 'range'
# conf. interval around median
stats['cilo'], stats['cihi'] = _compute_conf_interval(
x, med, stats['iqr'], bootstrap
)
# lowest/highest non-outliers
if np.isscalar(whis):
if np.isreal(whis):
loval = q1 - whis * stats['iqr']
hival = q3 + whis * stats['iqr']
elif whis in ['range', 'limit', 'limits', 'min/max']:
loval = np.min(x)
hival = np.max(x)
else:
raise ValueError('whis must be a float, valid string, or list '
'of percentiles')
else:
loval = np.percentile(x, whis[0])
hival = np.percentile(x, whis[1])
# get high extreme
wiskhi = x[x <= hival]
if len(wiskhi) == 0 or np.max(wiskhi) < q3:
stats['whishi'] = q3
else:
stats['whishi'] = np.max(wiskhi)
# get low extreme
wisklo = x[x >= loval]
if len(wisklo) == 0 or np.min(wisklo) > q1:
stats['whislo'] = q1
else:
stats['whislo'] = np.min(wisklo)
# compute a single array of outliers
stats['fliers'] = np.hstack([
x[x < stats['whislo']],
x[x > stats['whishi']],
])
# add in the remaining stats
stats['q1'], stats['med'], stats['q3'] = q1, med, q3
return bxpstats
# The ls_mapper maps short codes for line style to their full name used by
# backends; the reverse mapper is for mapping full names to short ones.
ls_mapper = {'-': 'solid', '--': 'dashed', '-.': 'dashdot', ':': 'dotted'}
ls_mapper_r = {v: k for k, v in ls_mapper.items()}
def contiguous_regions(mask):
"""
Return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
"""
mask = np.asarray(mask, dtype=bool)
if not mask.size:
return []
# Find the indices of region changes, and correct offset
idx, = np.nonzero(mask[:-1] != mask[1:])
idx += 1
# List operations are faster for moderately sized arrays
idx = idx.tolist()
# Add first and/or last index if needed
if mask[0]:
idx = [0] + idx
if mask[-1]:
idx.append(len(mask))
return list(zip(idx[::2], idx[1::2]))
def is_math_text(s):
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
s = str(s)
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
return even_dollars
def _to_unmasked_float_array(x):
"""
Convert a sequence to a float array; if input was a masked array, masked
values are converted to nans.
"""
if hasattr(x, 'mask'):
return np.ma.asarray(x, float).filled(np.nan)
else:
return np.asarray(x, float)
def _check_1d(x):
'''
Converts a sequence of less than 1 dimension, to an array of 1
dimension; leaves everything else untouched.
'''
if not hasattr(x, 'shape') or len(x.shape) < 1:
return np.atleast_1d(x)
else:
try:
x[:, None]
return x
except (IndexError, TypeError):
return np.atleast_1d(x)
def _reshape_2D(X, name):
"""
Use Fortran ordering to convert ndarrays and lists of iterables to lists of
1D arrays.
Lists of iterables are converted by applying `np.asarray` to each of their
elements. 1D ndarrays are returned in a singleton list containing them.
2D ndarrays are converted to the list of their *columns*.
*name* is used to generate the error message for invalid inputs.
"""
# Iterate over columns for ndarrays, over rows otherwise.
X = np.atleast_1d(X.T if isinstance(X, np.ndarray) else np.asarray(X))
if len(X) == 0:
return [[]]
if X.ndim == 1 and not isinstance(X[0], collections.abc.Iterable):
# 1D array of scalars: directly return it.
return [X]
elif X.ndim in [1, 2]:
# 2D array, or 1D array of iterables: flatten them first.
return [np.reshape(x, -1) for x in X]
else:
raise ValueError("{} must have 2 or fewer dimensions".format(name))
def violin_stats(X, method, points=100):
"""
Returns a list of dictionaries of data which can be used to draw a series
of violin plots. See the `Returns` section below to view the required keys
of the dictionary. Users can skip this function and pass a user-defined set
of dictionaries to the `axes.vplot` method instead of using MPL to do the
calculations.
Parameters
----------
X : array-like
Sample data that will be used to produce the gaussian kernel density
estimates. Must have 2 or fewer dimensions.
method : callable
The method used to calculate the kernel density estimate for each
column of data. When called via `method(v, coords)`, it should
return a vector of the values of the KDE evaluated at the values
specified in coords.
points : scalar, default = 100
Defines the number of points to evaluate each of the gaussian kernel
density estimates at.
Returns
-------
A list of dictionaries containing the results for each column of data.
The dictionaries contain at least the following:
- coords: A list of scalars containing the coordinates this particular
kernel density estimate was evaluated at.
- vals: A list of scalars containing the values of the kernel density
estimate at each of the coordinates given in `coords`.
- mean: The mean value for this column of data.
- median: The median value for this column of data.
- min: The minimum value for this column of data.
- max: The maximum value for this column of data.
"""
# List of dictionaries describing each of the violins.
vpstats = []
# Want X to be a list of data sequences
X = _reshape_2D(X, "X")
for x in X:
# Dictionary of results for this distribution
stats = {}
# Calculate basic stats for the distribution
min_val = np.min(x)
max_val = np.max(x)
# Evaluate the kernel density estimate
coords = np.linspace(min_val, max_val, points)
stats['vals'] = method(x, coords)
stats['coords'] = coords
# Store additional statistics for this distribution
stats['mean'] = np.mean(x)
stats['median'] = np.median(x)
stats['min'] = min_val
stats['max'] = max_val
# Append to output
vpstats.append(stats)
return vpstats
def pts_to_prestep(x, *args):
"""
Convert continuous line to pre-steps.
Given a set of ``N`` points, convert to ``2N - 1`` points, which when
connected linearly give a step function which changes values at the
beginning of the intervals.
Parameters
----------
x : array
The x location of the steps. May be empty.
y1, ..., yp : array
y arrays to be turned into steps; all must be the same length as ``x``.
Returns
-------
out : array
The x and y values converted to steps in the same order as the input;
can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
length ``N``, each of these arrays will be length ``2N + 1``. For
``N=0``, the length will be 0.
Examples
--------
>> x_s, y1_s, y2_s = pts_to_prestep(x, y1, y2)
"""
steps = np.zeros((1 + len(args), max(2 * len(x) - 1, 0)))
# In all `pts_to_*step` functions, only assign *once* using `x` and `args`,
# as converting to an array may be expensive.
steps[0, 0::2] = x
steps[0, 1::2] = steps[0, 0:-2:2]
steps[1:, 0::2] = args
steps[1:, 1::2] = steps[1:, 2::2]
return steps
def pts_to_poststep(x, *args):
"""
Convert continuous line to post-steps.
Given a set of ``N`` points convert to ``2N + 1`` points, which when
connected linearly give a step function which changes values at the end of
the intervals.
Parameters
----------
x : array
The x location of the steps. May be empty.
y1, ..., yp : array
y arrays to be turned into steps; all must be the same length as ``x``.
Returns
-------
out : array
The x and y values converted to steps in the same order as the input;
can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
length ``N``, each of these arrays will be length ``2N + 1``. For
``N=0``, the length will be 0.
Examples
--------
>> x_s, y1_s, y2_s = pts_to_poststep(x, y1, y2)
"""
steps = np.zeros((1 + len(args), max(2 * len(x) - 1, 0)))
steps[0, 0::2] = x
steps[0, 1::2] = steps[0, 2::2]
steps[1:, 0::2] = args
steps[1:, 1::2] = steps[1:, 0:-2:2]
return steps
def pts_to_midstep(x, *args):
"""
Convert continuous line to mid-steps.
Given a set of ``N`` points convert to ``2N`` points which when connected
linearly give a step function which changes values at the middle of the
intervals.
Parameters
----------
x : array
The x location of the steps. May be empty.
y1, ..., yp : array
y arrays to be turned into steps; all must be the same length as
``x``.
Returns
-------
out : array
The x and y values converted to steps in the same order as the input;
can be unpacked as ``x_out, y1_out, ..., yp_out``. If the input is
length ``N``, each of these arrays will be length ``2N``.
Examples
--------
>> x_s, y1_s, y2_s = pts_to_midstep(x, y1, y2)
"""
steps = np.zeros((1 + len(args), 2 * len(x)))
x = np.asanyarray(x)
steps[0, 1:-1:2] = steps[0, 2::2] = (x[:-1] + x[1:]) / 2
steps[0, :1] = x[:1] # Also works for zero-sized input.
steps[0, -1:] = x[-1:]
steps[1:, 0::2] = args
steps[1:, 1::2] = steps[1:, 0::2]
return steps
STEP_LOOKUP_MAP = {'default': lambda x, y: (x, y),
'steps': pts_to_prestep,
'steps-pre': pts_to_prestep,
'steps-post': pts_to_poststep,
'steps-mid': pts_to_midstep}
def index_of(y):
"""
A helper function to get the index of an input to plot
against if x values are not explicitly given.
Tries to get `y.index` (works if this is a pd.Series), if that
fails, return np.arange(y.shape[0]).
This will be extended in the future to deal with more types of
labeled data.
Parameters
----------
y : scalar or array-like
The proposed y-value
Returns
-------
x, y : ndarray
The x and y values to plot.
"""
try:
return y.index.values, y.values
except AttributeError:
y = _check_1d(y)
return np.arange(y.shape[0], dtype=float), y
def safe_first_element(obj):
if isinstance(obj, collections.abc.Iterator):
# needed to accept `array.flat` as input.
# np.flatiter reports as an instance of collections.Iterator
# but can still be indexed via [].
# This has the side effect of re-setting the iterator, but
# that is acceptable.
try:
return obj[0]
except TypeError:
pass
raise RuntimeError("matplotlib does not support generators "
"as input")
return next(iter(obj))
def sanitize_sequence(data):
"""Converts dictview object to list"""
return (list(data) if isinstance(data, collections.abc.MappingView)
else data)
def normalize_kwargs(kw, alias_mapping=None, required=(), forbidden=(),
allowed=None):
"""Helper function to normalize kwarg inputs
The order they are resolved are:
1. aliasing
2. required
3. forbidden
4. allowed
This order means that only the canonical names need appear in
`allowed`, `forbidden`, `required`
Parameters
----------
alias_mapping : dict or Artist subclass or Artist instance, optional
A mapping between a canonical name to a list of
aliases, in order of precedence from lowest to highest.
If the canonical value is not in the list it is assumed to have
the highest priority.
If an Artist subclass or instance is passed, use its properties alias
mapping.
required : iterable, optional
A tuple of fields that must be in kwargs.
forbidden : iterable, optional
A list of keys which may not be in kwargs
allowed : tuple, optional
A tuple of allowed fields. If this not None, then raise if
`kw` contains any keys not in the union of `required`
and `allowed`. To allow only the required fields pass in
``()`` for `allowed`
Raises
------
TypeError
To match what python raises if invalid args/kwargs are passed to
a callable.
"""
from matplotlib.artist import Artist
# deal with default value of alias_mapping
if alias_mapping is None:
alias_mapping = dict()
elif (isinstance(alias_mapping, type) and issubclass(alias_mapping, Artist)
or isinstance(alias_mapping, Artist)):
alias_mapping = getattr(alias_mapping, "_alias_map", {})
# make a local so we can pop
kw = dict(kw)
# output dictionary
ret = dict()
# hit all alias mappings
for canonical, alias_list in alias_mapping.items():
# the alias lists are ordered from lowest to highest priority
# so we know to use the last value in this list
tmp = []
seen = []
for a in alias_list:
try:
tmp.append(kw.pop(a))
seen.append(a)
except KeyError:
pass
# if canonical is not in the alias_list assume highest priority
if canonical not in alias_list:
try:
tmp.append(kw.pop(canonical))
seen.append(canonical)
except KeyError:
pass
# if we found anything in this set of aliases put it in the return
# dict
if tmp:
ret[canonical] = tmp[-1]
if len(tmp) > 1:
warn_deprecated(
"3.1", message=f"Saw kwargs {seen!r} which are all "
f"aliases for {canonical!r}. Kept value from "
f"{seen[-1]!r}. Passing multiple aliases for the same "
f"property will raise a TypeError %(removal)s.")
# at this point we know that all keys which are aliased are removed, update
# the return dictionary from the cleaned local copy of the input
ret.update(kw)
fail_keys = [k for k in required if k not in ret]
if fail_keys:
raise TypeError("The required keys {keys!r} "
"are not in kwargs".format(keys=fail_keys))
fail_keys = [k for k in forbidden if k in ret]
if fail_keys:
raise TypeError("The forbidden keys {keys!r} "
"are in kwargs".format(keys=fail_keys))
if allowed is not None:
allowed_set = {*required, *allowed}
fail_keys = [k for k in ret if k not in allowed_set]
if fail_keys:
raise TypeError(
"kwargs contains {keys!r} which are not in the required "
"{req!r} or allowed {allow!r} keys".format(
keys=fail_keys, req=required, allow=allowed))
return ret
@deprecated("3.1")
def get_label(y, default_name):
try:
return y.name
except AttributeError:
return default_name
_lockstr = """\
LOCKERROR: matplotlib is trying to acquire the lock
{!r}
and has failed. This maybe due to any other process holding this
lock. If you are sure no other matplotlib process is running try
removing these folders and trying again.
"""
@deprecated("3.0")
class Locked(object):
"""
Context manager to handle locks.
Based on code from conda.
(c) 2012-2013 Continuum Analytics, Inc. / https://www.continuum.io/
All Rights Reserved
conda is distributed under the terms of the BSD 3-clause license.
Consult LICENSE_CONDA or https://opensource.org/licenses/BSD-3-Clause.
"""
LOCKFN = '.matplotlib_lock'
class TimeoutError(RuntimeError):
pass
def __init__(self, path):
self.path = path
self.end = "-" + str(os.getpid())
self.lock_path = os.path.join(self.path, self.LOCKFN + self.end)
self.pattern = os.path.join(self.path, self.LOCKFN + '-*')
self.remove = True
def __enter__(self):
retries = 50
sleeptime = 0.1
while retries:
files = glob.glob(self.pattern)
if files and not files[0].endswith(self.end):
time.sleep(sleeptime)
retries -= 1
else:
break
else:
err_str = _lockstr.format(self.pattern)
raise self.TimeoutError(err_str)
if not files:
try:
os.makedirs(self.lock_path)
except OSError:
pass
else: # PID lock already here --- someone else will remove it.
self.remove = False
def __exit__(self, exc_type, exc_value, traceback):
if self.remove:
for path in self.lock_path, self.path:
try:
os.rmdir(path)
except OSError:
pass
@contextlib.contextmanager
def _lock_path(path):
"""
Context manager for locking a path.
Usage::
with _lock_path(path):
...
Another thread or process that attempts to lock the same path will wait
until this context manager is exited.
The lock is implemented by creating a temporary file in the parent
directory, so that directory must exist and be writable.
"""
path = Path(path)
lock_path = path.with_name(path.name + ".matplotlib-lock")
retries = 50
sleeptime = 0.1
for _ in range(retries):
try:
with lock_path.open("xb"):
break
except FileExistsError:
time.sleep(sleeptime)
else:
raise TimeoutError("""\
Lock error: Matplotlib failed to acquire the following lock file:
{}
This maybe due to another process holding this lock file. If you are sure no
other Matplotlib process is running, remove this file and try again.""".format(
lock_path))
try:
yield
finally:
lock_path.unlink()
def _topmost_artist(
artists,
_cached_max=functools.partial(max, key=operator.attrgetter("zorder"))):
"""Get the topmost artist of a list.
In case of a tie, return the *last* of the tied artists, as it will be
drawn on top of the others. `max` returns the first maximum in case of
ties, so we need to iterate over the list in reverse order.
"""
return _cached_max(reversed(artists))
def _str_equal(obj, s):
"""Return whether *obj* is a string equal to string *s*.
This helper solely exists to handle the case where *obj* is a numpy array,
because in such cases, a naive ``obj == s`` would yield an array, which
cannot be used in a boolean context.
"""
return isinstance(obj, str) and obj == s
def _str_lower_equal(obj, s):
"""Return whether *obj* is a string equal, when lowercased, to string *s*.
This helper solely exists to handle the case where *obj* is a numpy array,
because in such cases, a naive ``obj == s`` would yield an array, which
cannot be used in a boolean context.
"""
return isinstance(obj, str) and obj.lower() == s
def _define_aliases(alias_d, cls=None):
"""Class decorator for defining property aliases.
Use as ::
@cbook._define_aliases({"property": ["alias", ...], ...})
class C: ...
For each property, if the corresponding ``get_property`` is defined in the
class so far, an alias named ``get_alias`` will be defined; the same will
be done for setters. If neither the getter nor the setter exists, an
exception will be raised.
The alias map is stored as the ``_alias_map`` attribute on the class and
can be used by `~.normalize_kwargs` (which assumes that higher priority
aliases come last).
"""
if cls is None: # Return the actual class decorator.
return functools.partial(_define_aliases, alias_d)
def make_alias(name): # Enforce a closure over *name*.
@functools.wraps(getattr(cls, name))
def method(self, *args, **kwargs):
return getattr(self, name)(*args, **kwargs)
return method
for prop, aliases in alias_d.items():
exists = False
for prefix in ["get_", "set_"]:
if prefix + prop in vars(cls):
exists = True
for alias in aliases:
method = make_alias(prefix + prop)
method.__name__ = prefix + alias
method.__doc__ = "Alias for `{}`.".format(prefix + prop)
setattr(cls, prefix + alias, method)
if not exists:
raise ValueError(
"Neither getter nor setter exists for {!r}".format(prop))
if hasattr(cls, "_alias_map"):
# Need to decide on conflict resolution policy.
raise NotImplementedError("Parent class already defines aliases")
cls._alias_map = alias_d
return cls
def _array_perimeter(arr):
"""
Get the elements on the perimeter of ``arr``,
Parameters
----------
arr : ndarray, shape (M, N)
The input array
Returns
-------
perimeter : ndarray, shape (2*(M - 1) + 2*(N - 1),)
The elements on the perimeter of the array::
[arr[0,0] ... arr[0,-1] ... arr[-1, -1] ... arr[-1,0] ...]
Examples
--------
>>> i, j = np.ogrid[:3,:4]
>>> a = i*10 + j
>>> a
array([[ 0, 1, 2, 3],
[10, 11, 12, 13],
[20, 21, 22, 23]])
>>> _array_perimeter(a)
array([ 0, 1, 2, 3, 13, 23, 22, 21, 20, 10])
"""
# note we use Python's half-open ranges to avoid repeating
# the corners
forward = np.s_[0:-1] # [0 ... -1)
backward = np.s_[-1:0:-1] # [-1 ... 0)
return np.concatenate((
arr[0, forward],
arr[forward, -1],
arr[-1, backward],
arr[backward, 0],
))
@contextlib.contextmanager
def _setattr_cm(obj, **kwargs):
"""Temporarily set some attributes; restore original state at context exit.
"""
sentinel = object()
origs = [(attr, getattr(obj, attr, sentinel)) for attr in kwargs]
try:
for attr, val in kwargs.items():
setattr(obj, attr, val)
yield
finally:
for attr, orig in origs:
if orig is sentinel:
delattr(obj, attr)
else:
setattr(obj, attr, orig)
def _warn_external(message, category=None):
"""
`warnings.warn` wrapper that sets *stacklevel* to "outside Matplotlib".
The original emitter of the warning can be obtained by patching this
function back to `warnings.warn`, i.e. ``cbook._warn_external =
warnings.warn`` (or ``functools.partial(warnings.warn, stacklevel=2)``,
etc.).
"""
frame = sys._getframe()
for stacklevel in itertools.count(1): # lgtm[py/unused-loop-variable]
if frame is None:
# when called in embedded context may hit frame is None
break
if not re.match(r"\A(matplotlib|mpl_toolkits)(\Z|\.(?!tests\.))",
# Work around sphinx-gallery not setting __name__.
frame.f_globals.get("__name__", "")):
break
frame = frame.f_back
warnings.warn(message, category, stacklevel)
class _OrderedSet(collections.abc.MutableSet):
def __init__(self):
self._od = collections.OrderedDict()
def __contains__(self, key):
return key in self._od
def __iter__(self):
return iter(self._od)
def __len__(self):
return len(self._od)
def add(self, key):
self._od.pop(key, None)
self._od[key] = None
def discard(self, key):
self._od.pop(key, None)
# Agg's buffers are unmultiplied RGBA8888, which neither PyQt4 nor cairo
# support; however, both do support premultiplied ARGB32.
def _premultiplied_argb32_to_unmultiplied_rgba8888(buf):
"""
Convert a premultiplied ARGB32 buffer to an unmultiplied RGBA8888 buffer.
"""
rgba = np.take( # .take() ensures C-contiguity of the result.
buf,
[2, 1, 0, 3] if sys.byteorder == "little" else [1, 2, 3, 0], axis=2)
rgb = rgba[..., :-1]
alpha = rgba[..., -1]
# Un-premultiply alpha. The formula is the same as in cairo-png.c.
mask = alpha != 0
for channel in np.rollaxis(rgb, -1):
channel[mask] = (
(channel[mask].astype(int) * 255 + alpha[mask] // 2)
// alpha[mask])
return rgba
def _unmultiplied_rgba8888_to_premultiplied_argb32(rgba8888):
"""
Convert an unmultiplied RGBA8888 buffer to a premultiplied ARGB32 buffer.
"""
if sys.byteorder == "little":
argb32 = np.take(rgba8888, [2, 1, 0, 3], axis=2)
rgb24 = argb32[..., :-1]
alpha8 = argb32[..., -1:]
else:
argb32 = np.take(rgba8888, [3, 0, 1, 2], axis=2)
alpha8 = argb32[..., :1]
rgb24 = argb32[..., 1:]
# Only bother premultiplying when the alpha channel is not fully opaque,
# as the cost is not negligible. The unsafe cast is needed to do the
# multiplication in-place in an integer buffer.
if alpha8.min() != 0xff:
np.multiply(rgb24, alpha8 / 0xff, out=rgb24, casting="unsafe")
return argb32
def _check_and_log_subprocess(command, logger, **kwargs):
"""
Run *command* using `subprocess.check_output`. If it succeeds, return the
output (stdout and stderr); if not, raise an exception whose text includes
the failed command and captured output. Both the command and the output
are logged at DEBUG level on *logger*.
"""
logger.debug(command)
try:
report = subprocess.check_output(
command, stderr=subprocess.STDOUT, **kwargs)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
'The command\n'
' {}\n'
'failed and generated the following output:\n'
'{}'
.format(command, exc.output.decode('utf-8')))
logger.debug(report)
return report
def _check_not_matrix(**kwargs):
"""
If any value in *kwargs* is a `np.matrix`, raise a TypeError with the key
name in its message.
"""
for k, v in kwargs.items():
if isinstance(v, np.matrix):
raise TypeError(f"Argument {k!r} cannot be a np.matrix")
def _check_in_list(values, **kwargs):
"""
For each *key, value* pair in *kwargs*, check that *value* is in *values*;
if not, raise an appropriate ValueError.
Examples
--------
>>> cbook._check_in_list(["foo", "bar"], arg=arg, other_arg=other_arg)
"""
for k, v in kwargs.items():
if v not in values:
raise ValueError(
"{!r} is not a valid value for {}; supported values are {}"
.format(v, k, ', '.join(map(repr, values))))
|
40cc36ee7e9e553c6f3fefaaad6ca2aed70aa8275c708977e6c48dac51746f80
|
import tempfile
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_equal, assert_array_almost_equal_nulp)
import numpy as np
import datetime as datetime
import pytest
import matplotlib.mlab as mlab
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
'''
A lot of mlab.py has been deprecated in Matplotlib 2.2 and is scheduled for
removal in the future. The tests that use deprecated methods have a block
to catch the deprecation warning, and can be removed with the mlab code is
removed.
'''
class TestStride(object):
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0, axis=0):
'''This is an adaptation of the original window extraction
algorithm. This is here to test to make sure the new implementation
has the same result'''
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
# do the ffts of the slices
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
if axis == 1:
result = result.T
return result
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_windows_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
mlab.stride_windows(x, 5)
@pytest.mark.parametrize('n, noverlap',
[(0, None), (11, None), (2, 2), (2, 3)],
ids=['n less than 1', 'n greater than input',
'noverlap greater than n',
'noverlap equal to n'])
def test_stride_windows_invalid_params(self, n, noverlap):
x = np.arange(10)
with pytest.raises(ValueError):
mlab.stride_windows(x, n, noverlap)
@pytest.mark.parametrize('shape', [(), (10, 1)], ids=['0D', '2D'])
def test_stride_repeat_invalid_input_shape(self, shape):
x = np.arange(np.prod(shape)).reshape(shape)
with pytest.raises(ValueError):
mlab.stride_repeat(x, 5)
@pytest.mark.parametrize('axis', [-1, 2],
ids=['axis less than 0',
'axis greater than input shape'])
def test_stride_repeat_invalid_axis(self, axis):
x = np.array(0)
with pytest.raises(ValueError):
mlab.stride_repeat(x, 5, axis=axis)
def test_stride_repeat_n_lt_1_ValueError(self):
x = np.arange(10)
with pytest.raises(ValueError):
mlab.stride_repeat(x, 0)
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n', [1, 5], ids=['n1', 'n5'])
def test_stride_repeat(self, n, axis):
x = np.arange(10)
y = mlab.stride_repeat(x, n, axis=axis)
expected_shape = [10, 10]
expected_shape[axis] = n
yr = np.repeat(np.expand_dims(x, axis), n, axis=axis)
assert yr.shape == y.shape
assert_array_equal(yr, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
@pytest.mark.parametrize('n, noverlap',
[(1, 0), (5, 0), (15, 2), (13, -3)],
ids=['n1-noverlap0', 'n5-noverlap0',
'n15-noverlap2', 'n13-noverlapn3'])
def test_stride_windows(self, n, noverlap, axis):
x = np.arange(100)
y = mlab.stride_windows(x, n, noverlap=noverlap, axis=axis)
expected_shape = [0, 0]
expected_shape[axis] = n
expected_shape[1 - axis] = 100 // (n - noverlap)
yt = self.calc_window_target(x, n, noverlap=noverlap, axis=axis)
assert yt.shape == y.shape
assert_array_equal(yt, y)
assert tuple(expected_shape) == y.shape
assert self.get_base(y) is x
@pytest.mark.parametrize('axis', [0, 1], ids=['axis0', 'axis1'])
def test_stride_windows_n32_noverlap0_unflatten(self, axis):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n, axis=axis)
if axis == 0:
x1 = x1.T
assert y.shape == x1.shape
assert_array_equal(y, x1)
def test_stride_ensure_integer_type(self):
N = 100
x = np.full(N + 20, np.nan)
y = x[10:-10]
y[:] = 0.3
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33, noverlap=0.6)
assert_array_equal(y_strided, 0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33.3, noverlap=0)
assert_array_equal(y_strided, 0.3)
# even previous to #3845 could not find any problematic
# configuration however, let's be sure it's not accidentally
# introduced
y_strided = mlab.stride_repeat(y, n=33.815)
assert_array_equal(y_strided, 0.3)
@pytest.fixture
def tempcsv():
with tempfile.TemporaryFile(suffix='csv', mode="w+", newline='') as fd:
yield fd
def test_csv2rec_names_with_comments(tempcsv):
tempcsv.write('# comment\n1,2,3\n4,5,6\n')
tempcsv.seek(0)
array = mlab._csv2rec(tempcsv, names='a,b,c')
assert len(array) == 2
assert len(array.dtype) == 3
@pytest.mark.parametrize('input, kwargs', [
('01/11/14\n'
'03/05/76 12:00:01 AM\n'
'07/09/83 5:17:34 PM\n'
'06/20/2054 2:31:45 PM\n'
'10/31/00 11:50:23 AM\n',
{}),
('11/01/14\n'
'05/03/76 12:00:01 AM\n'
'09/07/83 5:17:34 PM\n'
'20/06/2054 2:31:45 PM\n'
'31/10/00 11:50:23 AM\n',
{'dayfirst': True}),
('14/01/11\n'
'76/03/05 12:00:01 AM\n'
'83/07/09 5:17:34 PM\n'
'2054/06/20 2:31:45 PM\n'
'00/10/31 11:50:23 AM\n',
{'yearfirst': True}),
], ids=['usdate', 'dayfirst', 'yearfirst'])
def test_csv2rec_dates(tempcsv, input, kwargs):
tempcsv.write(input)
expected = [datetime.datetime(2014, 1, 11, 0, 0),
datetime.datetime(1976, 3, 5, 0, 0, 1),
datetime.datetime(1983, 7, 9, 17, 17, 34),
datetime.datetime(2054, 6, 20, 14, 31, 45),
datetime.datetime(2000, 10, 31, 11, 50, 23)]
tempcsv.seek(0)
array = mlab._csv2rec(tempcsv, names='a', **kwargs)
assert_array_equal(array['a'].tolist(), expected)
class TestWindow(object):
def setup(self):
np.random.seed(0)
n = 1000
self.sig_rand = np.random.standard_normal(n) + 100.
self.sig_ones = np.ones(n)
def check_window_apply_repeat(self, x, window, NFFT, noverlap):
'''This is an adaptation of the original window application
algorithm. This is here to test to make sure the new implementation
has the same result'''
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
if np.iterable(window):
windowVals = window
else:
windowVals = window(np.ones(NFFT, x.dtype))
# do the ffts of the slices
for i in range(n):
result[:, i] = windowVals * x[ind[i]:ind[i]+NFFT]
return result
def test_window_none_rand(self):
res = mlab.window_none(self.sig_ones)
assert_array_equal(res, self.sig_ones)
def test_window_none_ones(self):
res = mlab.window_none(self.sig_rand)
assert_array_equal(res, self.sig_rand)
def test_window_hanning_rand(self):
targ = np.hanning(len(self.sig_rand)) * self.sig_rand
res = mlab.window_hanning(self.sig_rand)
assert_allclose(targ, res, atol=1e-06)
def test_window_hanning_ones(self):
targ = np.hanning(len(self.sig_ones))
res = mlab.window_hanning(self.sig_ones)
assert_allclose(targ, res, atol=1e-06)
def test_apply_window_1D_axis1_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning
with pytest.raises(ValueError):
mlab.apply_window(x, window, axis=1, return_window=False)
def test_apply_window_1D_els_wrongsize_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]-1))
with pytest.raises(ValueError):
mlab.apply_window(x, window)
def test_apply_window_0D_ValueError(self):
x = np.array(0)
window = mlab.window_hanning
with pytest.raises(ValueError):
mlab.apply_window(x, window, axis=1, return_window=False)
def test_apply_window_3D_ValueError(self):
x = self.sig_rand[np.newaxis][np.newaxis]
window = mlab.window_hanning
with pytest.raises(ValueError):
mlab.apply_window(x, window, axis=1, return_window=False)
def test_apply_window_hanning_1D(self):
x = self.sig_rand
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, return_window=True)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = window(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = window1(x)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els1_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1(x[:, i])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els2_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, axis=0, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1*x[:, i]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_els3_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, axis=0, return_window=True)
yt = mlab.apply_window(x, window1, axis=0, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D__els1_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning(np.ones(x.shape[1]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1(x[i, :])
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els2_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y, window2 = mlab.apply_window(x, window, axis=1, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1 * x[i, :]
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_els3_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = mlab.apply_window(x, window1, axis=1, return_window=False)
assert yt.shape == y.shape
assert x.shape == y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_stride_windows_hanning_2D_n13_noverlapn3_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
yi = mlab.stride_windows(x, n=13, noverlap=2, axis=0)
y = mlab.apply_window(yi, window, axis=0, return_window=False)
yt = self.check_window_apply_repeat(x, window, 13, 2)
assert yt.shape == y.shape
assert x.shape != y.shape
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_stack_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = mlab.apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = mlab.apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1_unflatten(self):
n = 32
ydata = np.arange(n)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydata = ydata.flatten()
ydata1 = mlab.stride_windows(ydata, 32, noverlap=0, axis=0)
result = mlab.apply_window(ydata1, mlab.window_hanning, axis=0,
return_window=False)
assert_allclose(ycontrol.T, result, atol=1e-08)
class TestDetrend(object):
def setup(self):
np.random.seed(0)
n = 1000
x = np.linspace(0., 100, n)
self.sig_zeros = np.zeros(n)
self.sig_off = self.sig_zeros + 100.
self.sig_slope = np.linspace(-10., 90., n)
self.sig_slope_mean = x - x.mean()
sig_rand = np.random.standard_normal(n)
sig_sin = np.sin(x*2*np.pi/(n/100))
sig_rand -= sig_rand.mean()
sig_sin -= sig_sin.mean()
self.sig_base = sig_rand + sig_sin
self.atol = 1e-08
def test_detrend_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend_none(input)
assert input == targ
def test_detrend_none_0D_zeros_axis1(self):
input = 0.
targ = input
res = mlab.detrend_none(input, axis=1)
assert input == targ
def test_detrend_str_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend(input, key='none')
assert input == targ
def test_detrend_detrend_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend(input, key=mlab.detrend_none)
assert input == targ
def test_detrend_none_0D_off(self):
input = 5.5
targ = input
res = mlab.detrend_none(input)
assert input == targ
def test_detrend_none_1D_off(self):
input = self.sig_off
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_slope(self):
input = self.sig_slope
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base(self):
input = self.sig_base
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = input.tolist()
res = mlab.detrend_none(input.tolist())
assert res == targ
def test_detrend_none_2D(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_2D_T(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input.T)
assert_array_equal(res.T, targ)
def test_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_1D_zeros(self):
input = self.sig_zeros
targ = self.sig_zeros
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base(self):
input = self.sig_base
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_off(self):
input = self.sig_base + self.sig_off
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope(self):
input = self.sig_base + self.sig_slope
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist(), axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_demean_0D_off(self):
input = 5.5
targ = 0.
with pytest.warns(MatplotlibDeprecationWarning):
res = mlab.demean(input, axis=None)
assert_almost_equal(res, targ)
def test_demean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
with pytest.warns(MatplotlibDeprecationWarning):
res = mlab.demean(input)
assert_allclose(res, targ, atol=1e-08)
def test_demean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
with pytest.warns(MatplotlibDeprecationWarning):
res = mlab.demean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_demean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
with pytest.warns(MatplotlibDeprecationWarning):
res = mlab.demean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, axis=None)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_str_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='mean', axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_str_constant_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend(input, key='constant', axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_str_default_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='default', axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_mean, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_default(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
with pytest.warns(MatplotlibDeprecationWarning):
res = mlab.demean(input)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
with pytest.warns(MatplotlibDeprecationWarning):
res = mlab.demean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
with pytest.warns(MatplotlibDeprecationWarning):
res = mlab.demean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
with pytest.warns(MatplotlibDeprecationWarning):
res = mlab.demean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
with pytest.warns(MatplotlibDeprecationWarning):
res = mlab.demean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_bad_key_str_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, key='spam')
def test_detrend_bad_key_var_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, key=5)
def test_detrend_mean_0D_d0_ValueError(self):
input = 5.5
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=0)
def test_detrend_0D_d0_ValueError(self):
input = 5.5
with pytest.raises(ValueError):
mlab.detrend(input, axis=0)
def test_detrend_mean_1D_d1_ValueError(self):
input = self.sig_slope
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=1)
def test_detrend_1D_d1_ValueError(self):
input = self.sig_slope
with pytest.raises(ValueError):
mlab.detrend(input, axis=1)
def test_demean_1D_d1_ValueError(self):
input = self.sig_slope
with pytest.raises(ValueError), \
pytest.warns(MatplotlibDeprecationWarning):
mlab.demean(input, axis=1)
def test_detrend_mean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend_mean(input, axis=2)
def test_detrend_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend(input, axis=2)
def test_demean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError), \
pytest.warns(MatplotlibDeprecationWarning):
mlab.demean(input, axis=2)
def test_detrend_linear_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_str_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='linear')
assert_almost_equal(res, targ)
def test_detrend_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_almost_equal(res, targ)
def test_detrend_linear_1d_off(self):
input = self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope(self):
input = self.sig_slope
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key='linear')
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off_list(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input.tolist())
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_2D_ValueError(self):
input = self.sig_slope[np.newaxis]
with pytest.raises(ValueError):
mlab.detrend_linear(input)
def test_detrend_str_linear_2d_slope_off_axis0(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='linear', axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_linear, axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_2d_slope_off_axis0_notranspose(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='linear', axis=1)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1_notranspose(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key=mlab.detrend_linear, axis=1)
assert_allclose(res, targ, atol=self.atol)
@pytest.mark.parametrize('iscomplex', [False, True],
ids=['real', 'complex'], scope='class')
@pytest.mark.parametrize('sides', ['onesided', 'twosided', 'default'],
scope='class')
@pytest.mark.parametrize(
'fstims,len_x,NFFT_density,nover_density,pad_to_density,pad_to_spectrum',
[
([], None, -1, -1, -1, -1),
([4], None, -1, -1, -1, -1),
([4, 5, 10], None, -1, -1, -1, -1),
([], None, None, -1, -1, None),
([], None, -1, -1, None, None),
([], None, None, -1, None, None),
([], 1024, 512, -1, -1, 128),
([], 256, -1, -1, 33, 257),
([], 255, 33, -1, -1, None),
([], 256, 128, -1, 256, 256),
([], None, -1, 32, -1, -1),
],
ids=[
'nosig',
'Fs4',
'FsAll',
'nosig_noNFFT',
'nosig_nopad_to',
'nosig_noNFFT_no_pad_to',
'nosig_trim',
'nosig_odd',
'nosig_oddlen',
'nosig_stretch',
'nosig_overlap',
],
scope='class')
class TestSpectral(object):
@pytest.fixture(scope='class', autouse=True)
def stim(self, request, fstims, iscomplex, sides, len_x, NFFT_density,
nover_density, pad_to_density, pad_to_spectrum):
Fs = 100.
x = np.arange(0, 10, 1 / Fs)
if len_x is not None:
x = x[:len_x]
# get the stimulus frequencies, defaulting to None
fstims = [Fs / fstim for fstim in fstims]
# get the constants, default to calculated values
if NFFT_density is None:
NFFT_density_real = 256
elif NFFT_density < 0:
NFFT_density_real = NFFT_density = 100
else:
NFFT_density_real = NFFT_density
if nover_density is None:
nover_density_real = 0
elif nover_density < 0:
nover_density_real = nover_density = NFFT_density_real // 2
else:
nover_density_real = nover_density
if pad_to_density is None:
pad_to_density_real = NFFT_density_real
elif pad_to_density < 0:
pad_to_density = int(2**np.ceil(np.log2(NFFT_density_real)))
pad_to_density_real = pad_to_density
else:
pad_to_density_real = pad_to_density
if pad_to_spectrum is None:
pad_to_spectrum_real = len(x)
elif pad_to_spectrum < 0:
pad_to_spectrum_real = pad_to_spectrum = len(x)
else:
pad_to_spectrum_real = pad_to_spectrum
if pad_to_spectrum is None:
NFFT_spectrum_real = NFFT_spectrum = pad_to_spectrum_real
else:
NFFT_spectrum_real = NFFT_spectrum = len(x)
nover_spectrum_real = nover_spectrum = 0
NFFT_specgram = NFFT_density
nover_specgram = nover_density
pad_to_specgram = pad_to_density
NFFT_specgram_real = NFFT_density_real
nover_specgram_real = nover_density_real
if sides == 'onesided' or (sides == 'default' and not iscomplex):
# frequencies for specgram, psd, and csd
# need to handle even and odd differently
if pad_to_density_real % 2:
freqs_density = np.linspace(0, Fs / 2,
num=pad_to_density_real,
endpoint=False)[::2]
else:
freqs_density = np.linspace(0, Fs / 2,
num=pad_to_density_real // 2 + 1)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(0, Fs / 2,
num=pad_to_spectrum_real,
endpoint=False)[::2]
else:
freqs_spectrum = np.linspace(0, Fs / 2,
num=pad_to_spectrum_real // 2 + 1)
else:
# frequencies for specgram, psd, and csd
# need to handle even and odd differentl
if pad_to_density_real % 2:
freqs_density = np.linspace(-Fs / 2, Fs / 2,
num=2 * pad_to_density_real,
endpoint=False)[1::2]
else:
freqs_density = np.linspace(-Fs / 2, Fs / 2,
num=pad_to_density_real,
endpoint=False)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(-Fs / 2, Fs / 2,
num=2 * pad_to_spectrum_real,
endpoint=False)[1::2]
else:
freqs_spectrum = np.linspace(-Fs / 2, Fs / 2,
num=pad_to_spectrum_real,
endpoint=False)
freqs_specgram = freqs_density
# time points for specgram
t_start = NFFT_specgram_real // 2
t_stop = len(x) - NFFT_specgram_real // 2 + 1
t_step = NFFT_specgram_real - nover_specgram_real
t_specgram = x[t_start:t_stop:t_step]
if NFFT_specgram_real % 2:
t_specgram += 1 / Fs / 2
if len(t_specgram) == 0:
t_specgram = np.array([NFFT_specgram_real / (2 * Fs)])
t_spectrum = np.array([NFFT_spectrum_real / (2 * Fs)])
t_density = t_specgram
y = np.zeros_like(x)
for i, fstim in enumerate(fstims):
y += np.sin(fstim * x * np.pi * 2) * 10**i
if iscomplex:
y = y.astype('complex')
# Interestingly, the instance on which this fixture is called is not
# the same as the one on which a test is run. So we need to modify the
# class itself when using a class-scoped fixture.
cls = request.cls
cls.Fs = Fs
cls.sides = sides
cls.fstims = fstims
cls.NFFT_density = NFFT_density
cls.nover_density = nover_density
cls.pad_to_density = pad_to_density
cls.NFFT_spectrum = NFFT_spectrum
cls.nover_spectrum = nover_spectrum
cls.pad_to_spectrum = pad_to_spectrum
cls.NFFT_specgram = NFFT_specgram
cls.nover_specgram = nover_specgram
cls.pad_to_specgram = pad_to_specgram
cls.t_specgram = t_specgram
cls.t_density = t_density
cls.t_spectrum = t_spectrum
cls.y = y
cls.freqs_density = freqs_density
cls.freqs_spectrum = freqs_spectrum
cls.freqs_specgram = freqs_specgram
cls.NFFT_density_real = NFFT_density_real
def check_freqs(self, vals, targfreqs, resfreqs, fstims):
assert resfreqs.argmin() == 0
assert resfreqs.argmax() == len(resfreqs)-1
assert_allclose(resfreqs, targfreqs, atol=1e-06)
for fstim in fstims:
i = np.abs(resfreqs - fstim).argmin()
assert vals[i] > vals[i+2]
assert vals[i] > vals[i-2]
def check_maxfreq(self, spec, fsp, fstims):
# skip the test if there are no frequencies
if len(fstims) == 0:
return
# if twosided, do the test for each side
if fsp.min() < 0:
fspa = np.abs(fsp)
zeroind = fspa.argmin()
self.check_maxfreq(spec[:zeroind], fspa[:zeroind], fstims)
self.check_maxfreq(spec[zeroind:], fspa[zeroind:], fstims)
return
fstimst = fstims[:]
spect = spec.copy()
# go through each peak and make sure it is correctly the maximum peak
while fstimst:
maxind = spect.argmax()
maxfreq = fsp[maxind]
assert_almost_equal(maxfreq, fstimst[-1])
del fstimst[-1]
spect[maxind-5:maxind+5] = 0
def test_spectral_helper_raises_complex_same_data(self):
# test that mode 'complex' cannot be used if x is not y
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, y=self.y+1, mode='complex')
def test_spectral_helper_raises_magnitude_same_data(self):
# test that mode 'magnitude' cannot be used if x is not y
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, y=self.y+1, mode='magnitude')
def test_spectral_helper_raises_angle_same_data(self):
# test that mode 'angle' cannot be used if x is not y
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, y=self.y+1, mode='angle')
def test_spectral_helper_raises_phase_same_data(self):
# test that mode 'phase' cannot be used if x is not y
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, y=self.y+1, mode='phase')
def test_spectral_helper_raises_unknown_mode(self):
# test that unknown value for mode cannot be used
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, mode='spam')
def test_spectral_helper_raises_unknown_sides(self):
# test that unknown value for sides cannot be used
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, y=self.y, sides='eggs')
def test_spectral_helper_raises_noverlap_gt_NFFT(self):
# test that noverlap cannot be larger than NFFT
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, y=self.y, NFFT=10, noverlap=20)
def test_spectral_helper_raises_noverlap_eq_NFFT(self):
# test that noverlap cannot be equal to NFFT
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, NFFT=10, noverlap=10)
def test_spectral_helper_raises_winlen_ne_NFFT(self):
# test that the window length cannot be different from NFFT
with pytest.raises(ValueError):
mlab._spectral_helper(x=self.y, y=self.y, NFFT=10,
window=np.ones(9))
def test_single_spectrum_helper_raises_mode_default(self):
# test that mode 'default' cannot be used with _single_spectrum_helper
with pytest.raises(ValueError):
mlab._single_spectrum_helper(x=self.y, mode='default')
def test_single_spectrum_helper_raises_mode_psd(self):
# test that mode 'psd' cannot be used with _single_spectrum_helper
with pytest.raises(ValueError):
mlab._single_spectrum_helper(x=self.y, mode='psd')
def test_spectral_helper_psd(self):
freqs = self.freqs_density
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
mode='psd')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_density, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
def test_spectral_helper_magnitude_specgram(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
def test_spectral_helper_magnitude_magnitude_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_spectrum,
Fs=self.Fs,
noverlap=self.nover_spectrum,
pad_to=self.pad_to_spectrum,
sides=self.sides,
mode='magnitude')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_spectrum, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == 1
def test_csd(self):
freqs = self.freqs_density
spec, fsp = mlab.csd(x=self.y, y=self.y+1,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_csd_padding(self):
"""Test zero padding of csd(). """
if self.NFFT_density is None: # for derived classes
return
sargs = dict(x=self.y, y=self.y+1, Fs=self.Fs, window=mlab.window_none,
sides=self.sides)
spec0, _ = mlab.csd(NFFT=self.NFFT_density, **sargs)
spec1, _ = mlab.csd(NFFT=self.NFFT_density*2, **sargs)
assert_almost_equal(np.sum(np.conjugate(spec0)*spec0).real,
np.sum(np.conjugate(spec1/2)*spec1/2).real)
def test_psd(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert spec.shape == freqs.shape
self.check_freqs(spec, freqs, fsp, self.fstims)
def test_psd_detrend_mean_func_offset(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_mean)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_mean)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_detrend_mean_str_offset(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='mean')
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='mean')
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_detrend_linear_func_trend(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_detrend_linear_str_trend(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='linear')
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='linear')
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1, windowVals = mlab.apply_window(ydata1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning_detrend_linear(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ycontrol = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = ycontrol
ycontrol2 = ycontrol
ycontrol1, windowVals = mlab.apply_window(ycontrol1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ycontrol2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
with pytest.raises(AssertionError):
assert_allclose(spec_b, spec_c, atol=1e-08)
def test_psd_windowarray(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=np.ones(self.NFFT_density_real))
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_psd_windowarray_scale_by_freq(self):
freqs = self.freqs_density
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning)
spec_s, fsp_s = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=True)
spec_n, fsp_n = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=False)
assert_array_equal(fsp, fsp_s)
assert_array_equal(fsp, fsp_n)
assert_array_equal(spec, spec_s)
assert_allclose(spec_s*(win**2).sum(),
spec_n/self.Fs*win.sum()**2,
atol=1e-08)
def test_complex_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.complex_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_magnitude_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.magnitude_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert spec.shape == freqs.shape
self.check_maxfreq(spec, fsp, self.fstims)
self.check_freqs(spec, freqs, fsp, self.fstims)
def test_angle_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.angle_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_phase_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.phase_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert spec.shape == freqs.shape
def test_specgram_auto(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_default(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='default')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_psd(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='psd')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_complex(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm = np.mean(np.abs(spec), axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_magnitude(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_angle(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
def test_specgram_phase(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert spec.shape[0] == freqs.shape[0]
assert spec.shape[1] == self.t_specgram.shape[0]
def test_specgram_warn_only1seg(self):
"""Warning should be raised if len(x) <= NFFT."""
with pytest.warns(UserWarning, match="Only one segment is calculated"):
mlab.specgram(x=self.y, NFFT=len(self.y), Fs=self.Fs)
def test_psd_csd_equal(self):
freqs = self.freqs_density
Pxx, freqsxx = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
Pxy, freqsxy = mlab.csd(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_almost_equal_nulp(Pxx, Pxy)
assert_array_equal(freqsxx, freqsxy)
def test_specgram_auto_default_equal(self):
'''test that mlab.specgram without mode and with mode 'default' and
'psd' are all the same'''
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specb, freqspecb, tb = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='default')
assert_array_equal(speca, specb)
assert_array_equal(freqspeca, freqspecb)
assert_array_equal(ta, tb)
def test_specgram_auto_psd_equal(self):
'''test that mlab.specgram without mode and with mode 'default' and
'psd' are all the same'''
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='psd')
assert_array_equal(speca, specc)
assert_array_equal(freqspeca, freqspecc)
assert_array_equal(ta, tc)
def test_specgram_complex_mag_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm, freqspecm, tm = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
assert_array_equal(freqspecc, freqspecm)
assert_array_equal(tc, tm)
assert_allclose(np.abs(specc), specm, atol=1e-06)
def test_specgram_complex_angle_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
assert_array_equal(freqspecc, freqspeca)
assert_array_equal(tc, ta)
assert_allclose(np.angle(specc), speca, atol=1e-06)
def test_specgram_complex_phase_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specp, freqspecp, tp = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
assert_array_equal(freqspecc, freqspecp)
assert_array_equal(tc, tp)
assert_allclose(np.unwrap(np.angle(specc), axis=0), specp,
atol=1e-06)
def test_specgram_angle_phase_equivalent(self):
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
specp, freqspecp, tp = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
assert_array_equal(freqspeca, freqspecp)
assert_array_equal(ta, tp)
assert_allclose(np.unwrap(speca, axis=0), specp,
atol=1e-06)
def test_psd_windowarray_equal(self):
freqs = self.freqs_density
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
speca, fspa = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=win)
specb, fspb = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_equal(fspa, fspb)
assert_allclose(speca, specb, atol=1e-08)
# extra test for cohere...
def test_cohere():
N = 1024
np.random.seed(19680801)
x = np.random.randn(N)
# phase offset
y = np.roll(x, 20)
# high-freq roll-off
y = np.convolve(y, np.ones(20) / 20., mode='same')
cohsq, f = mlab.cohere(x, y, NFFT=256, Fs=2, noverlap=128)
assert_allclose(np.mean(cohsq), 0.837, atol=1.e-3)
assert np.isreal(np.mean(cohsq))
#*****************************************************************
# These Tests where taken from SCIPY with some minor modifications
# this can be retrieved from:
# https://github.com/scipy/scipy/blob/master/scipy/stats/tests/test_kdeoth.py
#*****************************************************************
class TestGaussianKDE(object):
def test_kde_integer_input(self):
"""Regression test for #1181."""
x1 = np.arange(5)
kde = mlab.GaussianKDE(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869,
0.13480721]
np.testing.assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_gaussian_kde_covariance_caching(self):
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754,
0.01664475]
# set it to the default bandwidth.
kde2 = mlab.GaussianKDE(x1, 'scott')
y2 = kde2(xs)
np.testing.assert_array_almost_equal(y_expected, y2, decimal=7)
def test_kde_bandwidth_method(self):
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = mlab.GaussianKDE(xn)
# Supply a callable
gkde2 = mlab.GaussianKDE(xn, 'scott')
# Supply a scalar
gkde3 = mlab.GaussianKDE(xn, bw_method=gkde.factor)
xs = np.linspace(-7, 7, 51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert kdepdf.all() == kdepdf2.all()
kdepdf3 = gkde3.evaluate(xs)
assert kdepdf.all() == kdepdf3.all()
class TestGaussianKDECustom(object):
def test_no_data(self):
"""Pass no data into the GaussianKDE class."""
with pytest.raises(ValueError):
mlab.GaussianKDE([])
def test_single_dataset_element(self):
"""Pass a single dataset element into the GaussianKDE class."""
with pytest.raises(ValueError):
mlab.GaussianKDE([42])
def test_silverman_multidim_dataset(self):
"""Use a multi-dimensional array as the dataset and test silverman's
output"""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
with pytest.raises(np.linalg.LinAlgError):
mlab.GaussianKDE(x1, "silverman")
def test_silverman_singledim_dataset(self):
"""Use a single dimension list as the dataset and test silverman's
output."""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "silverman")
y_expected = 0.76770389927475502
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scott_multidim_dataset(self):
"""Use a multi-dimensional array as the dataset and test scott's output
"""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
with pytest.raises(np.linalg.LinAlgError):
mlab.GaussianKDE(x1, "scott")
def test_scott_singledim_dataset(self):
"""Use a single-dimensional array as the dataset and test scott's
output"""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "scott")
y_expected = 0.72477966367769553
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scalar_empty_dataset(self):
"""Use an empty array as the dataset and test the scalar's cov factor
"""
with pytest.raises(ValueError):
mlab.GaussianKDE([], bw_method=5)
def test_scalar_covariance_dataset(self):
"""Use a dataset and test a scalar's cov factor
"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
kde = mlab.GaussianKDE(multidim_data, bw_method=0.5)
assert kde.covariance_factor() == 0.5
def test_callable_covariance_dataset(self):
"""Use a multi-dimensional array as the dataset and test the callable's
cov factor"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
def callable_fun(x):
return 0.55
kde = mlab.GaussianKDE(multidim_data, bw_method=callable_fun)
assert kde.covariance_factor() == 0.55
def test_callable_singledim_dataset(self):
"""Use a single-dimensional array as the dataset and test the
callable's cov factor"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data, bw_method='silverman')
y_expected = 0.48438841363348911
assert_almost_equal(kde.covariance_factor(), y_expected, 7)
def test_wrong_bw_method(self):
"""Test the error message that should be called when bw is invalid."""
np.random.seed(8765678)
n_basesample = 50
data = np.random.randn(n_basesample)
with pytest.raises(ValueError):
mlab.GaussianKDE(data, bw_method="invalid")
class TestGaussianKDEEvaluate(object):
def test_evaluate_diff_dim(self):
"""
Test the evaluate method when the dim's of dataset and points have
different dimensions.
"""
x1 = np.arange(3, 10, 2)
kde = mlab.GaussianKDE(x1)
x2 = np.arange(3, 12, 2)
y_expected = [
0.08797252, 0.11774109, 0.11774109, 0.08797252, 0.0370153
]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_inv_dim(self):
"""
Invert the dimensions; i.e., for a dataset of dimension 1 [3,2,4], the
points should have a dimension of 3 [[3],[2],[4]].
"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data)
x2 = [[1], [2], [3]]
with pytest.raises(ValueError):
kde.evaluate(x2)
def test_evaluate_dim_and_num(self):
"""Tests if evaluated against a one by one array"""
x1 = np.arange(3, 10, 2)
x2 = np.array([3])
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_point_dim_not_one(self):
x1 = np.arange(3, 10, 2)
x2 = [np.arange(3, 10, 2), np.arange(3, 10, 2)]
kde = mlab.GaussianKDE(x1)
with pytest.raises(ValueError):
kde.evaluate(x2)
def test_evaluate_equal_dim_and_num_lt(self):
x1 = np.arange(3, 10, 2)
x2 = np.arange(3, 8, 2)
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252, 0.11774109, 0.11774109]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_psd_onesided_norm():
u = np.array([0, 1, 2, 3, 1, 2, 1])
dt = 1.0
Su = np.abs(np.fft.fft(u) * dt)**2 / (dt * u.size)
P, f = mlab.psd(u, NFFT=u.size, Fs=1/dt, window=mlab.window_none,
detrend=mlab.detrend_none, noverlap=0, pad_to=None,
scale_by_freq=None,
sides='onesided')
Su_1side = np.append([Su[0]], Su[1:4] + Su[4:][::-1])
assert_allclose(P, Su_1side, atol=1e-06)
def test_psd_oversampling():
"""Test the case len(x) < NFFT for psd()."""
u = np.array([0, 1, 2, 3, 1, 2, 1])
dt = 1.0
Su = np.abs(np.fft.fft(u) * dt)**2 / (dt * u.size)
P, f = mlab.psd(u, NFFT=u.size*2, Fs=1/dt, window=mlab.window_none,
detrend=mlab.detrend_none, noverlap=0, pad_to=None,
scale_by_freq=None,
sides='onesided')
Su_1side = np.append([Su[0]], Su[1:4] + Su[4:][::-1])
assert_almost_equal(np.sum(P), np.sum(Su_1side)) # same energy
|
334fd3223ee4d2035585d0e42dbda3dfa7ecfbd7d11adaffd9207089713fb1fb
|
import io
from itertools import chain
import warnings
import numpy as np
import pytest
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.path as mpath
import matplotlib.transforms as mtransforms
import matplotlib.collections as mcollections
import matplotlib.artist as martist
from matplotlib.testing.decorators import image_comparison
def test_patch_transform_of_none():
# tests the behaviour of patches added to an Axes with various transform
# specifications
ax = plt.axes()
ax.set_xlim([1, 3])
ax.set_ylim([1, 3])
# Draw an ellipse over data coord (2,2) by specifying device coords.
xy_data = (2, 2)
xy_pix = ax.transData.transform_point(xy_data)
# Not providing a transform of None puts the ellipse in data coordinates .
e = mpatches.Ellipse(xy_data, width=1, height=1, fc='yellow', alpha=0.5)
ax.add_patch(e)
assert e._transform == ax.transData
# Providing a transform of None puts the ellipse in device coordinates.
e = mpatches.Ellipse(xy_pix, width=120, height=120, fc='coral',
transform=None, alpha=0.5)
assert e.is_transform_set()
ax.add_patch(e)
assert isinstance(e._transform, mtransforms.IdentityTransform)
# Providing an IdentityTransform puts the ellipse in device coordinates.
e = mpatches.Ellipse(xy_pix, width=100, height=100,
transform=mtransforms.IdentityTransform(), alpha=0.5)
ax.add_patch(e)
assert isinstance(e._transform, mtransforms.IdentityTransform)
# Not providing a transform, and then subsequently "get_transform" should
# not mean that "is_transform_set".
e = mpatches.Ellipse(xy_pix, width=120, height=120, fc='coral',
alpha=0.5)
intermediate_transform = e.get_transform()
assert not e.is_transform_set()
ax.add_patch(e)
assert e.get_transform() != intermediate_transform
assert e.is_transform_set()
assert e._transform == ax.transData
def test_collection_transform_of_none():
# tests the behaviour of collections added to an Axes with various
# transform specifications
ax = plt.axes()
ax.set_xlim([1, 3])
ax.set_ylim([1, 3])
# draw an ellipse over data coord (2,2) by specifying device coords
xy_data = (2, 2)
xy_pix = ax.transData.transform_point(xy_data)
# not providing a transform of None puts the ellipse in data coordinates
e = mpatches.Ellipse(xy_data, width=1, height=1)
c = mcollections.PatchCollection([e], facecolor='yellow', alpha=0.5)
ax.add_collection(c)
# the collection should be in data coordinates
assert c.get_offset_transform() + c.get_transform() == ax.transData
# providing a transform of None puts the ellipse in device coordinates
e = mpatches.Ellipse(xy_pix, width=120, height=120)
c = mcollections.PatchCollection([e], facecolor='coral',
alpha=0.5)
c.set_transform(None)
ax.add_collection(c)
assert isinstance(c.get_transform(), mtransforms.IdentityTransform)
# providing an IdentityTransform puts the ellipse in device coordinates
e = mpatches.Ellipse(xy_pix, width=100, height=100)
c = mcollections.PatchCollection([e],
transform=mtransforms.IdentityTransform(),
alpha=0.5)
ax.add_collection(c)
assert isinstance(c._transOffset, mtransforms.IdentityTransform)
@image_comparison(baseline_images=["clip_path_clipping"], remove_text=True)
def test_clipping():
exterior = mpath.Path.unit_rectangle().deepcopy()
exterior.vertices *= 4
exterior.vertices -= 2
interior = mpath.Path.unit_circle().deepcopy()
interior.vertices = interior.vertices[::-1]
clip_path = mpath.Path(vertices=np.concatenate([exterior.vertices,
interior.vertices]),
codes=np.concatenate([exterior.codes,
interior.codes]))
star = mpath.Path.unit_regular_star(6).deepcopy()
star.vertices *= 2.6
ax1 = plt.subplot(121)
col = mcollections.PathCollection([star], lw=5, edgecolor='blue',
facecolor='red', alpha=0.7, hatch='*')
col.set_clip_path(clip_path, ax1.transData)
ax1.add_collection(col)
ax2 = plt.subplot(122, sharex=ax1, sharey=ax1)
patch = mpatches.PathPatch(star, lw=5, edgecolor='blue', facecolor='red',
alpha=0.7, hatch='*')
patch.set_clip_path(clip_path, ax2.transData)
ax2.add_patch(patch)
ax1.set_xlim([-3, 3])
ax1.set_ylim([-3, 3])
def test_cull_markers():
x = np.random.random(20000)
y = np.random.random(20000)
fig, ax = plt.subplots()
ax.plot(x, y, 'k.')
ax.set_xlim(2, 3)
pdf = io.BytesIO()
fig.savefig(pdf, format="pdf")
assert len(pdf.getvalue()) < 8000
svg = io.BytesIO()
fig.savefig(svg, format="svg")
assert len(svg.getvalue()) < 20000
@image_comparison(baseline_images=['hatching'], remove_text=True,
style='default')
def test_hatching():
fig, ax = plt.subplots(1, 1)
# Default hatch color.
rect1 = mpatches.Rectangle((0, 0), 3, 4, hatch='/')
ax.add_patch(rect1)
rect2 = mcollections.RegularPolyCollection(4, sizes=[16000],
offsets=[(1.5, 6.5)],
transOffset=ax.transData,
hatch='/')
ax.add_collection(rect2)
# Ensure edge color is not applied to hatching.
rect3 = mpatches.Rectangle((4, 0), 3, 4, hatch='/', edgecolor='C1')
ax.add_patch(rect3)
rect4 = mcollections.RegularPolyCollection(4, sizes=[16000],
offsets=[(5.5, 6.5)],
transOffset=ax.transData,
hatch='/', edgecolor='C1')
ax.add_collection(rect4)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
def test_remove():
fig, ax = plt.subplots()
im = ax.imshow(np.arange(36).reshape(6, 6))
ln, = ax.plot(range(5))
assert fig.stale
assert ax.stale
fig.canvas.draw()
assert not fig.stale
assert not ax.stale
assert not ln.stale
assert im in ax._mouseover_set
assert ln not in ax._mouseover_set
assert im.axes is ax
im.remove()
ln.remove()
for art in [im, ln]:
assert art.axes is None
assert art.figure is None
assert im not in ax._mouseover_set
assert fig.stale
assert ax.stale
@image_comparison(baseline_images=["default_edges"], remove_text=True,
extensions=['png'], style='default')
def test_default_edges():
fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(2, 2)
ax1.plot(np.arange(10), np.arange(10), 'x',
np.arange(10) + 1, np.arange(10), 'o')
ax2.bar(np.arange(10), np.arange(10), align='edge')
ax3.text(0, 0, "BOX", size=24, bbox=dict(boxstyle='sawtooth'))
ax3.set_xlim((-1, 1))
ax3.set_ylim((-1, 1))
pp1 = mpatches.PathPatch(
mpath.Path([(0, 0), (1, 0), (1, 1), (0, 0)],
[mpath.Path.MOVETO, mpath.Path.CURVE3,
mpath.Path.CURVE3, mpath.Path.CLOSEPOLY]),
fc="none", transform=ax4.transData)
ax4.add_patch(pp1)
def test_properties():
ln = mlines.Line2D([], [])
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
ln.properties()
assert len(w) == 0
def test_setp():
# Check empty list
plt.setp([])
plt.setp([[]])
# Check arbitrary iterables
fig, axes = plt.subplots()
lines1 = axes.plot(range(3))
lines2 = axes.plot(range(3))
martist.setp(chain(lines1, lines2), 'lw', 5)
plt.setp(axes.spines.values(), color='green')
# Check `file` argument
sio = io.StringIO()
plt.setp(lines1, 'zorder', file=sio)
assert sio.getvalue() == ' zorder: float\n'
def test_None_zorder():
fig, ax = plt.subplots()
ln, = ax.plot(range(5), zorder=None)
assert ln.get_zorder() == mlines.Line2D.zorder
ln.set_zorder(123456)
assert ln.get_zorder() == 123456
ln.set_zorder(None)
assert ln.get_zorder() == mlines.Line2D.zorder
@pytest.mark.parametrize('accept_clause, expected', [
('', 'unknown'),
("ACCEPTS: [ '-' | '--' | '-.' ]", "[ '-' | '--' | '-.' ]"),
('ACCEPTS: Some description.', 'Some description.'),
('.. ACCEPTS: Some description.', 'Some description.'),
('arg : int', 'int'),
('*arg : int', 'int'),
('arg : int\nACCEPTS: Something else.', 'Something else. '),
])
def test_artist_inspector_get_valid_values(accept_clause, expected):
class TestArtist(martist.Artist):
def set_f(self, arg):
pass
TestArtist.set_f.__doc__ = """
Some text.
%s
""" % accept_clause
valid_values = martist.ArtistInspector(TestArtist).get_valid_values('f')
assert valid_values == expected
def test_artist_inspector_get_aliases():
# test the correct format and type of get_aliases method
ai = martist.ArtistInspector(mlines.Line2D)
aliases = ai.get_aliases()
assert aliases["linewidth"] == {"lw"}
|
c4a32b35356f7096ddaa49d9eccb8d48a917687889697b07cdd07678cfaf854e
|
from pathlib import Path
import matplotlib
from matplotlib.font_manager import FontProperties
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
@image_comparison(baseline_images=["truetype-conversion"],
extensions=["pdf"])
# mpltest.ttf does not have "l"/"p" glyphs so we get a warning when trying to
# get the font extents.
def test_truetype_conversion(recwarn):
fontprop = FontProperties(
fname=str(Path(__file__).with_name('mpltest.ttf').resolve()), size=80)
matplotlib.rcParams['pdf.fonttype'] = 3
fig, ax = plt.subplots()
ax.text(0, 0, "ABCDE", fontproperties=fontprop)
ax.set_xticks([])
ax.set_yticks([])
|
d4ab25f7cfe3cd2b8916944128698b879438617ecd882f6edd625d9423dc65ef
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.testing.decorators import image_comparison
from matplotlib.table import CustomCell, Table
from matplotlib.path import Path
def test_non_square():
# Check that creating a non-square table works
cellcolors = ['b', 'r']
plt.table(cellColours=cellcolors)
@image_comparison(baseline_images=['table_zorder'],
extensions=['png'],
remove_text=True)
def test_zorder():
data = [[66386, 174296],
[58230, 381139]]
colLabels = ('Freeze', 'Wind')
rowLabels = ['%d year' % x for x in (100, 50)]
cellText = []
yoff = np.zeros(len(colLabels))
for row in reversed(data):
yoff += row
cellText.append(['%1.1f' % (x/1000.0) for x in yoff])
t = np.linspace(0, 2*np.pi, 100)
plt.plot(t, np.cos(t), lw=4, zorder=2)
plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels,
loc='center',
zorder=-2,
)
plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels,
loc='upper center',
zorder=4,
)
plt.yticks([])
@image_comparison(baseline_images=['table_labels'],
extensions=['png'])
def test_label_colours():
dim = 3
c = np.linspace(0, 1, dim)
colours = plt.cm.RdYlGn(c)
cellText = [['1'] * dim] * dim
fig = plt.figure()
ax1 = fig.add_subplot(4, 1, 1)
ax1.axis('off')
ax1.table(cellText=cellText,
rowColours=colours,
loc='best')
ax2 = fig.add_subplot(4, 1, 2)
ax2.axis('off')
ax2.table(cellText=cellText,
rowColours=colours,
rowLabels=['Header'] * dim,
loc='best')
ax3 = fig.add_subplot(4, 1, 3)
ax3.axis('off')
ax3.table(cellText=cellText,
colColours=colours,
loc='best')
ax4 = fig.add_subplot(4, 1, 4)
ax4.axis('off')
ax4.table(cellText=cellText,
colColours=colours,
colLabels=['Header'] * dim,
loc='best')
@image_comparison(baseline_images=['table_cell_manipulation'],
extensions=['png'], remove_text=True)
def test_diff_cell_table():
cells = ('horizontal', 'vertical', 'open', 'closed', 'T', 'R', 'B', 'L')
cellText = [['1'] * len(cells)] * 2
colWidths = [0.1] * len(cells)
_, axes = plt.subplots(nrows=len(cells), figsize=(4, len(cells)+1))
for ax, cell in zip(axes, cells):
ax.table(
colWidths=colWidths,
cellText=cellText,
loc='center',
edges=cell,
)
ax.axis('off')
plt.tight_layout()
def test_customcell():
types = ('horizontal', 'vertical', 'open', 'closed', 'T', 'R', 'B', 'L')
codes = (
(Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO, Path.MOVETO),
(Path.MOVETO, Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO),
(Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.MOVETO),
(Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY),
(Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.LINETO, Path.MOVETO),
(Path.MOVETO, Path.MOVETO, Path.LINETO, Path.MOVETO, Path.MOVETO),
(Path.MOVETO, Path.LINETO, Path.MOVETO, Path.MOVETO, Path.MOVETO),
(Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.MOVETO, Path.LINETO),
)
for t, c in zip(types, codes):
cell = CustomCell((0, 0), visible_edges=t, width=1, height=1)
code = tuple(s for _, s in cell.get_path().iter_segments())
assert c == code
@image_comparison(baseline_images=['table_auto_column'],
extensions=['png'])
def test_auto_column():
fig = plt.figure()
# iterable list input
ax1 = fig.add_subplot(4, 1, 1)
ax1.axis('off')
tb1 = ax1.table(cellText=[['Fit Text', 2],
['very long long text, Longer text than default', 1]],
rowLabels=["A", "B"],
colLabels=["Col1", "Col2"],
loc="center")
tb1.auto_set_font_size(False)
tb1.set_fontsize(12)
tb1.auto_set_column_width([-1, 0, 1])
# iterable tuple input
ax2 = fig.add_subplot(4, 1, 2)
ax2.axis('off')
tb2 = ax2.table(cellText=[['Fit Text', 2],
['very long long text, Longer text than default', 1]],
rowLabels=["A", "B"],
colLabels=["Col1", "Col2"],
loc="center")
tb2.auto_set_font_size(False)
tb2.set_fontsize(12)
tb2.auto_set_column_width((-1, 0, 1))
#3 single inputs
ax3 = fig.add_subplot(4, 1, 3)
ax3.axis('off')
tb3 = ax3.table(cellText=[['Fit Text', 2],
['very long long text, Longer text than default', 1]],
rowLabels=["A", "B"],
colLabels=["Col1", "Col2"],
loc="center")
tb3.auto_set_font_size(False)
tb3.set_fontsize(12)
tb3.auto_set_column_width(-1)
tb3.auto_set_column_width(0)
tb3.auto_set_column_width(1)
#4 non integer iterable input
ax4 = fig.add_subplot(4, 1, 4)
ax4.axis('off')
tb4 = ax4.table(cellText=[['Fit Text', 2],
['very long long text, Longer text than default', 1]],
rowLabels=["A", "B"],
colLabels=["Col1", "Col2"],
loc="center")
tb4.auto_set_font_size(False)
tb4.set_fontsize(12)
tb4.auto_set_column_width("-101")
def test_table_cells():
fig, ax = plt.subplots()
table = Table(ax)
cell = table.add_cell(1, 2, 1, 1)
assert isinstance(cell, CustomCell)
assert cell is table[1, 2]
cell2 = CustomCell((0, 0), 1, 2, visible_edges=None)
table[2, 1] = cell2
assert table[2, 1] is cell2
# make sure gettitem support has not broken
# properties and setp
table.properties()
plt.setp(table)
|
5ed373b8d9c36546f8fcef5fe34cd573c10ae7cff682de982bedda3291c8936f
|
import itertools
import numpy
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
import pytest
def check_shared(axs, x_shared, y_shared):
"""
x_shared and y_shared are n x n boolean matrices; entry (i, j) indicates
whether the x (or y) axes of subplots i and j should be shared.
"""
for (i1, ax1), (i2, ax2), (i3, (name, shared)) in itertools.product(
enumerate(axs),
enumerate(axs),
enumerate(zip("xy", [x_shared, y_shared]))):
if i2 <= i1:
continue
assert \
(getattr(axs[0], "_shared_{}_axes".format(name)).joined(ax1, ax2)
== shared[i1, i2]), \
"axes %i and %i incorrectly %ssharing %s axis" % (
i1, i2, "not " if shared[i1, i2] else "", name)
def check_visible(axs, x_visible, y_visible):
def tostr(v):
return "invisible" if v else "visible"
for ax, vx, vy in zip(axs, x_visible, y_visible):
for l in ax.get_xticklabels() + [ax.get_xaxis().offsetText]:
assert l.get_visible() == vx, \
"X axis was incorrectly %s" % (tostr(vx))
for l in ax.get_yticklabels() + [ax.get_yaxis().offsetText]:
assert l.get_visible() == vy, \
"Y axis was incorrectly %s" % (tostr(vy))
def test_shared():
rdim = (4, 4, 2)
share = {
'all': numpy.ones(rdim[:2], dtype=bool),
'none': numpy.zeros(rdim[:2], dtype=bool),
'row': numpy.array([
[False, True, False, False],
[True, False, False, False],
[False, False, False, True],
[False, False, True, False]]),
'col': numpy.array([
[False, False, True, False],
[False, False, False, True],
[True, False, False, False],
[False, True, False, False]]),
}
visible = {
'x': {
'all': [False, False, True, True],
'col': [False, False, True, True],
'row': [True] * 4,
'none': [True] * 4,
False: [True] * 4,
True: [False, False, True, True],
},
'y': {
'all': [True, False, True, False],
'col': [True] * 4,
'row': [True, False, True, False],
'none': [True] * 4,
False: [True] * 4,
True: [True, False, True, False],
},
}
share[False] = share['none']
share[True] = share['all']
# test default
f, ((a1, a2), (a3, a4)) = plt.subplots(2, 2)
axs = [a1, a2, a3, a4]
check_shared(axs, share['none'], share['none'])
plt.close(f)
# test all option combinations
ops = [False, True, 'all', 'none', 'row', 'col']
for xo in ops:
for yo in ops:
f, ((a1, a2), (a3, a4)) = plt.subplots(2, 2, sharex=xo, sharey=yo)
axs = [a1, a2, a3, a4]
check_shared(axs, share[xo], share[yo])
check_visible(axs, visible['x'][xo], visible['y'][yo])
plt.close(f)
# test label_outer
f, ((a1, a2), (a3, a4)) = plt.subplots(2, 2, sharex=True, sharey=True)
axs = [a1, a2, a3, a4]
for ax in axs:
ax.label_outer()
check_visible(axs, [False, False, True, True], [True, False, True, False])
def test_shared_and_moved():
# test if sharey is on, but then tick_left is called that labels don't
# re-appear. Seaborn does this just to be sure yaxis is on left...
f, (a1, a2) = plt.subplots(1, 2, sharey=True)
check_visible([a2], [True], [False])
a2.yaxis.tick_left()
check_visible([a2], [True], [False])
f, (a1, a2) = plt.subplots(2, 1, sharex=True)
check_visible([a1], [False], [True])
a2.xaxis.tick_bottom()
check_visible([a1], [False], [True])
def test_exceptions():
# TODO should this test more options?
with pytest.raises(ValueError):
plt.subplots(2, 2, sharex='blah')
with pytest.raises(ValueError):
plt.subplots(2, 2, sharey='blah')
# We filter warnings in this test which are genuine since
# the point of this test is to ensure that this raises.
with pytest.warns(UserWarning, match='.*sharex argument to subplots'), \
pytest.raises(ValueError):
plt.subplots(2, 2, -1)
with pytest.warns(UserWarning, match='.*sharex argument to subplots'), \
pytest.raises(ValueError):
plt.subplots(2, 2, 0)
with pytest.warns(UserWarning, match='.*sharex argument to subplots'), \
pytest.raises(ValueError):
plt.subplots(2, 2, 5)
@image_comparison(baseline_images=['subplots_offset_text'], remove_text=False)
def test_subplots_offsettext():
x = numpy.arange(0, 1e10, 1e9)
y = numpy.arange(0, 100, 10)+1e4
fig, axes = plt.subplots(2, 2, sharex='col', sharey='all')
axes[0, 0].plot(x, x)
axes[1, 0].plot(x, x)
axes[0, 1].plot(y, x)
axes[1, 1].plot(y, x)
def test_get_gridspec():
# ahem, pretty trivial, but...
fig, ax = plt.subplots()
assert ax.get_subplotspec().get_gridspec() == ax.get_gridspec()
def test_dont_mutate_kwargs():
subplot_kw = {'sharex': 'all'}
gridspec_kw = {'width_ratios': [1, 2]}
fig, ax = plt.subplots(1, 2, subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw)
assert subplot_kw == {'sharex': 'all'}
assert gridspec_kw == {'width_ratios': [1, 2]}
|
18461e024bb4f609f77ab78f699307456554040170bb37405e6ae7165e5ae267
|
import os
from pathlib import Path
import shutil
import subprocess
from tempfile import TemporaryDirectory
import numpy as np
import pytest
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.testing.compare import compare_images, ImageComparisonFailure
from matplotlib.testing.decorators import image_comparison, _image_directories
from matplotlib.backends.backend_pgf import PdfPages
baseline_dir, result_dir = _image_directories(lambda: 'dummy func')
def check_for(texsystem):
with TemporaryDirectory() as tmpdir:
tex_path = Path(tmpdir, "test.tex")
tex_path.write_text(r"""
\documentclass{minimal}
\usepackage{pgf}
\begin{document}
\typeout{pgfversion=\pgfversion}
\makeatletter
\@@end
""")
try:
subprocess.check_call(
[texsystem, "-halt-on-error", str(tex_path)], cwd=tmpdir,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except (OSError, subprocess.CalledProcessError):
return False
return True
needs_xelatex = pytest.mark.skipif(not check_for('xelatex'),
reason='xelatex + pgf is required')
needs_pdflatex = pytest.mark.skipif(not check_for('pdflatex'),
reason='pdflatex + pgf is required')
needs_lualatex = pytest.mark.skipif(not check_for('lualatex'),
reason='lualatex + pgf is required')
def compare_figure(fname, savefig_kwargs={}, tol=0):
actual = os.path.join(result_dir, fname)
plt.savefig(actual, **savefig_kwargs)
expected = os.path.join(result_dir, "expected_%s" % fname)
shutil.copyfile(os.path.join(baseline_dir, fname), expected)
err = compare_images(expected, actual, tol=tol)
if err:
raise ImageComparisonFailure(err)
def create_figure():
plt.figure()
x = np.linspace(0, 1, 15)
# line plot
plt.plot(x, x ** 2, "b-")
# marker
plt.plot(x, 1 - x**2, "g>")
# filled paths and patterns
plt.fill_between([0., .4], [.4, 0.], hatch='//', facecolor="lightgray",
edgecolor="red")
plt.fill([3, 3, .8, .8, 3], [2, -2, -2, 0, 2], "b")
# text and typesetting
plt.plot([0.9], [0.5], "ro", markersize=3)
plt.text(0.9, 0.5, 'unicode (ü, °, µ) and math ($\\mu_i = x_i^2$)',
ha='right', fontsize=20)
plt.ylabel('sans-serif, blue, $\\frac{\\sqrt{x}}{y^2}$..',
family='sans-serif', color='blue')
plt.xlim(0, 1)
plt.ylim(0, 1)
# test compiling a figure to pdf with xelatex
@needs_xelatex
@pytest.mark.backend('pgf')
@image_comparison(baseline_images=['pgf_xelatex'], extensions=['pdf'],
style='default')
def test_xelatex():
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
create_figure()
# test compiling a figure to pdf with pdflatex
@needs_pdflatex
@pytest.mark.backend('pgf')
@image_comparison(baseline_images=['pgf_pdflatex'], extensions=['pdf'],
style='default')
def test_pdflatex():
if os.environ.get('APPVEYOR', False):
pytest.xfail("pdflatex test does not work on appveyor due to missing "
"LaTeX fonts")
rc_pdflatex = {'font.family': 'serif',
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
'pgf.preamble': ['\\usepackage[utf8x]{inputenc}',
'\\usepackage[T1]{fontenc}']}
mpl.rcParams.update(rc_pdflatex)
create_figure()
# test updating the rc parameters for each figure
@needs_xelatex
@needs_pdflatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_rcupdate():
rc_sets = [{'font.family': 'sans-serif',
'font.size': 30,
'figure.subplot.left': .2,
'lines.markersize': 10,
'pgf.rcfonts': False,
'pgf.texsystem': 'xelatex'},
{'font.family': 'monospace',
'font.size': 10,
'figure.subplot.left': .1,
'lines.markersize': 20,
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
'pgf.preamble': ['\\usepackage[utf8x]{inputenc}',
'\\usepackage[T1]{fontenc}',
'\\usepackage{sfmath}']}]
tol = [6, 0]
for i, rc_set in enumerate(rc_sets):
with mpl.rc_context(rc_set):
create_figure()
compare_figure('pgf_rcupdate%d.pdf' % (i + 1), tol=tol[i])
# test backend-side clipping, since large numbers are not supported by TeX
@needs_xelatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_pathclip():
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
plt.figure()
plt.plot([0., 1e100], [0., 1e100])
plt.xlim(0, 1)
plt.ylim(0, 1)
# this test passes if compiling/saving to pdf works (no image comparison)
plt.savefig(os.path.join(result_dir, "pgf_pathclip.pdf"))
# test mixed mode rendering
@needs_xelatex
@pytest.mark.backend('pgf')
@image_comparison(baseline_images=['pgf_mixedmode'], extensions=['pdf'],
style='default')
def test_mixedmode():
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
Y, X = np.ogrid[-1:1:40j, -1:1:40j]
plt.figure()
plt.pcolor(X**2 + Y**2).set_rasterized(True)
# test bbox_inches clipping
@needs_xelatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_bbox_inches():
rc_xelatex = {'font.family': 'serif',
'pgf.rcfonts': False}
mpl.rcParams.update(rc_xelatex)
Y, X = np.ogrid[-1:1:40j, -1:1:40j]
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.plot(range(5))
ax2 = fig.add_subplot(122)
ax2.plot(range(5))
plt.tight_layout()
bbox = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
compare_figure('pgf_bbox_inches.pdf', savefig_kwargs={'bbox_inches': bbox},
tol=0)
@needs_pdflatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_pdf_pages():
rc_pdflatex = {
'font.family': 'serif',
'pgf.rcfonts': False,
'pgf.texsystem': 'pdflatex',
}
mpl.rcParams.update(rc_pdflatex)
fig1 = plt.figure()
ax1 = fig1.add_subplot(1, 1, 1)
ax1.plot(range(5))
fig1.tight_layout()
fig2 = plt.figure(figsize=(3, 2))
ax2 = fig2.add_subplot(1, 1, 1)
ax2.plot(range(5))
fig2.tight_layout()
with PdfPages(os.path.join(result_dir, 'pdfpages.pdf')) as pdf:
pdf.savefig(fig1)
pdf.savefig(fig2)
@needs_xelatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_pdf_pages_metadata():
rc_pdflatex = {
'font.family': 'serif',
'pgf.rcfonts': False,
'pgf.texsystem': 'xelatex',
}
mpl.rcParams.update(rc_pdflatex)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(range(5))
fig.tight_layout()
md = {'author': 'me', 'title': 'Multipage PDF with pgf'}
path = os.path.join(result_dir, 'pdfpages_meta.pdf')
with PdfPages(path, metadata=md) as pdf:
pdf.savefig(fig)
pdf.savefig(fig)
pdf.savefig(fig)
assert pdf.get_pagecount() == 3
@needs_lualatex
@pytest.mark.style('default')
@pytest.mark.backend('pgf')
def test_pdf_pages_lualatex():
rc_pdflatex = {
'font.family': 'serif',
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex'
}
mpl.rcParams.update(rc_pdflatex)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(range(5))
fig.tight_layout()
md = {'author': 'me', 'title': 'Multipage PDF with pgf'}
path = os.path.join(result_dir, 'pdfpages_lua.pdf')
with PdfPages(path, metadata=md) as pdf:
pdf.savefig(fig)
pdf.savefig(fig)
assert pdf.get_pagecount() == 2
|
b18fcb11db6834f6372807a96978489e0437fdd4943562fee639afe0ef31d32a
|
"""
Testing that skewed axes properly work.
"""
from contextlib import ExitStack
import itertools
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
from matplotlib.axes import Axes
import matplotlib.transforms as transforms
import matplotlib.axis as maxis
import matplotlib.spines as mspines
import matplotlib.patches as mpatch
from matplotlib.projections import register_projection
# The sole purpose of this class is to look at the upper, lower, or total
# interval as appropriate and see what parts of the tick to draw, if any.
class SkewXTick(maxis.XTick):
def draw(self, renderer):
with ExitStack() as stack:
for artist in [self.gridline, self.tick1line, self.tick2line,
self.label1, self.label2]:
stack.callback(artist.set_visible, artist.get_visible())
needs_lower = transforms.interval_contains(
self.axes.lower_xlim, self.get_loc())
needs_upper = transforms.interval_contains(
self.axes.upper_xlim, self.get_loc())
self.tick1line.set_visible(
self.tick1line.get_visible() and needs_lower)
self.label1.set_visible(
self.label1.get_visible() and needs_lower)
self.tick2line.set_visible(
self.tick2line.get_visible() and needs_upper)
self.label2.set_visible(
self.label2.get_visible() and needs_upper)
super(SkewXTick, self).draw(renderer)
def get_view_interval(self):
return self.axes.xaxis.get_view_interval()
# This class exists to provide two separate sets of intervals to the tick,
# as well as create instances of the custom tick
class SkewXAxis(maxis.XAxis):
def _get_tick(self, major):
return SkewXTick(self.axes, None, '', major=major)
def get_view_interval(self):
return self.axes.upper_xlim[0], self.axes.lower_xlim[1]
# This class exists to calculate the separate data range of the
# upper X-axis and draw the spine there. It also provides this range
# to the X-axis artist for ticking and gridlines
class SkewSpine(mspines.Spine):
def _adjust_location(self):
pts = self._path.vertices
if self.spine_type == 'top':
pts[:, 0] = self.axes.upper_xlim
else:
pts[:, 0] = self.axes.lower_xlim
# This class handles registration of the skew-xaxes as a projection as well
# as setting up the appropriate transformations. It also overrides standard
# spines and axes instances as appropriate.
class SkewXAxes(Axes):
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='skewx')``.
name = 'skewx'
def _init_axis(self):
# Taken from Axes and modified to use our modified X-axis
self.xaxis = SkewXAxis(self)
self.spines['top'].register_axis(self.xaxis)
self.spines['bottom'].register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines['left'].register_axis(self.yaxis)
self.spines['right'].register_axis(self.yaxis)
def _gen_axes_spines(self):
spines = {'top': SkewSpine.linear_spine(self, 'top'),
'bottom': mspines.Spine.linear_spine(self, 'bottom'),
'left': mspines.Spine.linear_spine(self, 'left'),
'right': mspines.Spine.linear_spine(self, 'right')}
return spines
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
rot = 30
# Get the standard transform setup from the Axes base class
Axes._set_lim_and_transforms(self)
# Need to put the skew in the middle, after the scale and limits,
# but before the transAxes. This way, the skew is done in Axes
# coordinates thus performing the transform around the proper origin
# We keep the pre-transAxes transform around for other users, like the
# spines for finding bounds
self.transDataToAxes = (self.transScale +
(self.transLimits +
transforms.Affine2D().skew_deg(rot, 0)))
# Create the full transform from Data to Pixels
self.transData = self.transDataToAxes + self.transAxes
# Blended transforms like this need to have the skewing applied using
# both axes, in axes coords like before.
self._xaxis_transform = (transforms.blended_transform_factory(
self.transScale + self.transLimits,
transforms.IdentityTransform()) +
transforms.Affine2D().skew_deg(rot, 0)) + self.transAxes
@property
def lower_xlim(self):
return self.axes.viewLim.intervalx
@property
def upper_xlim(self):
pts = [[0., 1.], [1., 1.]]
return self.transDataToAxes.inverted().transform(pts)[:, 0]
# Now register the projection with matplotlib so the user can select
# it.
register_projection(SkewXAxes)
@image_comparison(baseline_images=['skew_axes'], remove_text=True)
def test_set_line_coll_dash_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='skewx')
ax.set_xlim(-50, 50)
ax.set_ylim(50, -50)
ax.grid(True)
# An example of a slanted line at constant X
ax.axvline(0, color='b')
@image_comparison(baseline_images=['skew_rects'], remove_text=True)
def test_skew_rectangle():
fix, axes = plt.subplots(5, 5, sharex=True, sharey=True, figsize=(8, 8))
axes = axes.flat
rotations = list(itertools.product([-3, -1, 0, 1, 3], repeat=2))
axes[0].set_xlim([-3, 3])
axes[0].set_ylim([-3, 3])
axes[0].set_aspect('equal', share=True)
for ax, (xrots, yrots) in zip(axes, rotations):
xdeg, ydeg = 45 * xrots, 45 * yrots
t = transforms.Affine2D().skew_deg(xdeg, ydeg)
ax.set_title('Skew of {0} in X and {1} in Y'.format(xdeg, ydeg))
ax.add_patch(mpatch.Rectangle([-1, -1], 2, 2,
transform=t + ax.transData,
alpha=0.5, facecolor='coral'))
plt.subplots_adjust(wspace=0, left=0.01, right=0.99, bottom=0.01, top=0.99)
|
42c75985601324f87765833617dd0c4500b2a16dd13943865d882b3345d4c6c7
|
import importlib
import importlib.util
import os
import signal
import subprocess
import sys
import time
import urllib.request
import pytest
import matplotlib as mpl
# Minimal smoke-testing of the backends for which the dependencies are
# PyPI-installable on Travis. They are not available for all tested Python
# versions so we don't fail on missing backends.
def _get_testable_interactive_backends():
backends = []
for deps, backend in [
(["cairo", "gi"], "gtk3agg"),
(["cairo", "gi"], "gtk3cairo"),
(["PyQt5"], "qt5agg"),
(["PyQt5", "cairocffi"], "qt5cairo"),
(["tkinter"], "tkagg"),
(["wx"], "wx"),
(["wx"], "wxagg"),
]:
reason = None
if not os.environ.get("DISPLAY"):
reason = "No $DISPLAY"
elif any(importlib.util.find_spec(dep) is None for dep in deps):
reason = "Missing dependency"
if reason:
backend = pytest.param(
backend, marks=pytest.mark.skip(reason=reason))
backends.append(backend)
return backends
# Using a timer not only allows testing of timers (on other backends), but is
# also necessary on gtk3 and wx, where a direct call to key_press_event("q")
# from draw_event causes breakage due to the canvas widget being deleted too
# early. Also, gtk3 redefines key_press_event with a different signature, so
# we directly invoke it from the superclass instead.
_test_script = """\
import importlib
import importlib.util
import sys
from unittest import TestCase
import matplotlib as mpl
from matplotlib import pyplot as plt, rcParams
from matplotlib.backend_bases import FigureCanvasBase
rcParams.update({
"webagg.open_in_browser": False,
"webagg.port_retries": 1,
})
backend = plt.rcParams["backend"].lower()
assert_equal = TestCase().assertEqual
assert_raises = TestCase().assertRaises
if backend.endswith("agg") and not backend.startswith(("gtk3", "web")):
# Force interactive framework setup.
plt.figure()
# Check that we cannot switch to a backend using another interactive
# framework, but can switch to a backend using cairo instead of agg, or a
# non-interactive backend. In the first case, we use tkagg as the "other"
# interactive backend as it is (essentially) guaranteed to be present.
# Moreover, don't test switching away from gtk3 (as Gtk.main_level() is
# not set up at this point yet) and webagg (which uses no interactive
# framework).
if backend != "tkagg":
with assert_raises(ImportError):
mpl.use("tkagg", force=True)
def check_alt_backend(alt_backend):
mpl.use(alt_backend, force=True)
fig = plt.figure()
assert_equal(
type(fig.canvas).__module__,
"matplotlib.backends.backend_{}".format(alt_backend))
if importlib.util.find_spec("cairocffi"):
check_alt_backend(backend[:-3] + "cairo")
check_alt_backend("svg")
mpl.use(backend, force=True)
fig, ax = plt.subplots()
assert_equal(
type(fig.canvas).__module__,
"matplotlib.backends.backend_{}".format(backend))
ax.plot([0, 1], [2, 3])
timer = fig.canvas.new_timer(1)
timer.add_callback(FigureCanvasBase.key_press_event, fig.canvas, "q")
# Trigger quitting upon draw.
fig.canvas.mpl_connect("draw_event", lambda event: timer.start())
plt.show()
"""
_test_timeout = 10 # Empirically, 1s is not enough on Travis.
@pytest.mark.parametrize("backend", _get_testable_interactive_backends())
@pytest.mark.flaky(reruns=3)
def test_interactive_backend(backend):
proc = subprocess.run([sys.executable, "-c", _test_script],
env={**os.environ, "MPLBACKEND": backend},
timeout=_test_timeout)
if proc.returncode:
pytest.fail("The subprocess returned with non-zero exit status "
f"{proc.returncode}.")
@pytest.mark.skipif('SYSTEM_TEAMFOUNDATIONCOLLECTIONURI' in os.environ,
reason="this test fails an azure for unknown reasons")
@pytest.mark.skipif(os.name == "nt", reason="Cannot send SIGINT on Windows.")
def test_webagg():
pytest.importorskip("tornado")
proc = subprocess.Popen([sys.executable, "-c", _test_script],
env={**os.environ, "MPLBACKEND": "webagg"})
url = "http://{}:{}".format(
mpl.rcParams["webagg.address"], mpl.rcParams["webagg.port"])
timeout = time.perf_counter() + _test_timeout
while True:
try:
retcode = proc.poll()
# check that the subprocess for the server is not dead
assert retcode is None
conn = urllib.request.urlopen(url)
break
except urllib.error.URLError:
if time.perf_counter() > timeout:
pytest.fail("Failed to connect to the webagg server.")
else:
continue
conn.close()
proc.send_signal(signal.SIGINT)
assert proc.wait(timeout=_test_timeout) == 0
|
9a1a1f51664f5c74e38f712dc14fddaaa9b7333a9972732adf581e4d5670843a
|
import platform
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import numpy as np
import pytest
from cycler import cycler
@image_comparison(baseline_images=['color_cycle_basic'], remove_text=True,
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
extensions=['png'])
def test_colorcycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']))
xs = np.arange(10)
ys = 0.25 * xs + 2
ax.plot(xs, ys, label='red', lw=4)
ys = 0.45 * xs + 3
ax.plot(xs, ys, label='green', lw=4)
ys = 0.65 * xs + 4
ax.plot(xs, ys, label='yellow', lw=4)
ys = 0.85 * xs + 5
ax.plot(xs, ys, label='red2', lw=4)
ax.legend(loc='upper left')
@image_comparison(baseline_images=['marker_cycle', 'marker_cycle'],
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
remove_text=True, extensions=['png'])
def test_marker_cycle():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('c', ['r', 'g', 'y']) +
cycler('marker', ['.', '*', 'x']))
xs = np.arange(10)
ys = 0.25 * xs + 2
ax.plot(xs, ys, label='red dot', lw=4, ms=16)
ys = 0.45 * xs + 3
ax.plot(xs, ys, label='green star', lw=4, ms=16)
ys = 0.65 * xs + 4
ax.plot(xs, ys, label='yellow x', lw=4, ms=16)
ys = 0.85 * xs + 5
ax.plot(xs, ys, label='red2 dot', lw=4, ms=16)
ax.legend(loc='upper left')
fig, ax = plt.subplots()
# Test keyword arguments, numpy arrays, and generic iterators
ax.set_prop_cycle(c=np.array(['r', 'g', 'y']),
marker=iter(['.', '*', 'x']))
xs = np.arange(10)
ys = 0.25 * xs + 2
ax.plot(xs, ys, label='red dot', lw=4, ms=16)
ys = 0.45 * xs + 3
ax.plot(xs, ys, label='green star', lw=4, ms=16)
ys = 0.65 * xs + 4
ax.plot(xs, ys, label='yellow x', lw=4, ms=16)
ys = 0.85 * xs + 5
ax.plot(xs, ys, label='red2 dot', lw=4, ms=16)
ax.legend(loc='upper left')
@image_comparison(baseline_images=['lineprop_cycle_basic'], remove_text=True,
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
extensions=['png'])
def test_linestylecycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('ls', ['-', '--', ':']))
xs = np.arange(10)
ys = 0.25 * xs + 2
ax.plot(xs, ys, label='solid', lw=4, color='k')
ys = 0.45 * xs + 3
ax.plot(xs, ys, label='dashed', lw=4, color='k')
ys = 0.65 * xs + 4
ax.plot(xs, ys, label='dotted', lw=4, color='k')
ys = 0.85 * xs + 5
ax.plot(xs, ys, label='solid2', lw=4, color='k')
ax.legend(loc='upper left')
@image_comparison(baseline_images=['fill_cycle_basic'], remove_text=True,
extensions=['png'])
def test_fillcycle_basic():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('c', ['r', 'g', 'y']) +
cycler('hatch', ['xx', 'O', '|-']) +
cycler('linestyle', ['-', '--', ':']))
xs = np.arange(10)
ys = 0.25 * xs**.5 + 2
ax.fill(xs, ys, label='red, xx', linewidth=3)
ys = 0.45 * xs**.5 + 3
ax.fill(xs, ys, label='green, circle', linewidth=3)
ys = 0.65 * xs**.5 + 4
ax.fill(xs, ys, label='yellow, cross', linewidth=3)
ys = 0.85 * xs**.5 + 5
ax.fill(xs, ys, label='red2, xx', linewidth=3)
ax.legend(loc='upper left')
@image_comparison(baseline_images=['fill_cycle_ignore'], remove_text=True,
extensions=['png'])
def test_fillcycle_ignore():
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', ['r', 'g', 'y']) +
cycler('hatch', ['xx', 'O', '|-']) +
cycler('marker', ['.', '*', 'D']))
xs = np.arange(10)
ys = 0.25 * xs**.5 + 2
# Should not advance the cycler, even though there is an
# unspecified property in the cycler "marker".
# "marker" is not a Polygon property, and should be ignored.
ax.fill(xs, ys, 'r', hatch='xx', label='red, xx')
ys = 0.45 * xs**.5 + 3
# Allow the cycler to advance, but specify some properties
ax.fill(xs, ys, hatch='O', label='red, circle')
ys = 0.65 * xs**.5 + 4
ax.fill(xs, ys, label='green, circle')
ys = 0.85 * xs**.5 + 5
ax.fill(xs, ys, label='yellow, cross')
ax.legend(loc='upper left')
@image_comparison(baseline_images=['property_collision_plot'],
remove_text=True, extensions=['png'])
def test_property_collision_plot():
fig, ax = plt.subplots()
ax.set_prop_cycle('linewidth', [2, 4])
for c in range(1, 4):
ax.plot(np.arange(10), c * np.arange(10), lw=0.1, color='k')
ax.plot(np.arange(10), 4 * np.arange(10), color='k')
ax.plot(np.arange(10), 5 * np.arange(10), color='k')
@image_comparison(baseline_images=['property_collision_fill'],
remove_text=True, extensions=['png'])
def test_property_collision_fill():
fig, ax = plt.subplots()
xs = np.arange(10)
ys = 0.25 * xs**.5 + 2
ax.set_prop_cycle(linewidth=[2, 3, 4, 5, 6], facecolor='bgcmy')
for c in range(1, 4):
ax.fill(xs, c * ys, lw=0.1)
ax.fill(xs, 4 * ys)
ax.fill(xs, 5 * ys)
def test_valid_input_forms():
fig, ax = plt.subplots()
# These should not raise an error.
ax.set_prop_cycle(None)
ax.set_prop_cycle(cycler('linewidth', [1, 2]))
ax.set_prop_cycle('color', 'rgywkbcm')
ax.set_prop_cycle('lw', (1, 2))
ax.set_prop_cycle('linewidth', [1, 2])
ax.set_prop_cycle('linewidth', iter([1, 2]))
ax.set_prop_cycle('linewidth', np.array([1, 2]))
ax.set_prop_cycle('color', np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]))
ax.set_prop_cycle('dashes', [[], [13, 2], [8, 3, 1, 3], [None, None]])
ax.set_prop_cycle(lw=[1, 2], color=['k', 'w'], ls=['-', '--'])
ax.set_prop_cycle(lw=np.array([1, 2]),
color=np.array(['k', 'w']),
ls=np.array(['-', '--']))
assert True
def test_cycle_reset():
fig, ax = plt.subplots()
# Can't really test a reset because only a cycle object is stored
# but we can test the first item of the cycle.
prop = next(ax._get_lines.prop_cycler)
ax.set_prop_cycle(linewidth=[10, 9, 4])
assert prop != next(ax._get_lines.prop_cycler)
ax.set_prop_cycle(None)
got = next(ax._get_lines.prop_cycler)
assert prop == got
def test_invalid_input_forms():
fig, ax = plt.subplots()
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(1)
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle([1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('color', 'fish')
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('linewidth', 1)
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('linewidth', {'1': 1, '2': 2})
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(linewidth=1, color='r')
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle('foobar', [1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(foobar=[1, 2])
with pytest.raises((TypeError, ValueError)):
ax.set_prop_cycle(cycler(foobar=[1, 2]))
with pytest.raises(ValueError):
ax.set_prop_cycle(cycler(color='rgb', c='cmy'))
|
79ad0932f93ecddfec56d2782aeecf942d12a8a83782f5984a3a19560f448a82
|
import warnings
import pytest
import matplotlib
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter
with warnings.catch_warnings():
warnings.simplefilter('ignore')
needs_usetex = pytest.mark.skipif(
not matplotlib.checkdep_usetex(True),
reason='Missing TeX of Ghostscript or dvipng')
@needs_usetex
@image_comparison(baseline_images=['test_usetex'],
extensions=['pdf', 'png'],
tol=0.3)
def test_usetex():
matplotlib.rcParams['text.usetex'] = True
fig = plt.figure()
ax = fig.add_subplot(111)
ax.text(0.1, 0.2,
# the \LaTeX macro exercises character sizing and placement,
# \left[ ... \right\} draw some variable-height characters,
# \sqrt and \frac draw horizontal rules, \mathrm changes the font
r'\LaTeX\ $\left[\int\limits_e^{2e}'
r'\sqrt\frac{\log^3 x}{x}\,\mathrm{d}x \right\}$',
fontsize=24)
ax.set_xticks([])
ax.set_yticks([])
|
047aa0f1880be407cb198a9fe16e55b9f9a804dde230bef4578ed9d0eb51de7d
|
from matplotlib.cbook import MatplotlibDeprecationWarning
import matplotlib.pyplot as plt
from matplotlib.scale import Log10Transform, InvertedLog10Transform
from matplotlib.testing.decorators import check_figures_equal, image_comparison
import numpy as np
import io
import platform
import pytest
@check_figures_equal()
def test_log_scales(fig_test, fig_ref):
ax_test = fig_test.add_subplot(122, yscale='log', xscale='symlog')
ax_test.axvline(24.1)
ax_test.axhline(24.1)
xlim = ax_test.get_xlim()
ylim = ax_test.get_ylim()
ax_ref = fig_ref.add_subplot(122, yscale='log', xscale='symlog')
ax_ref.set(xlim=xlim, ylim=ylim)
ax_ref.plot([24.1, 24.1], ylim, 'b')
ax_ref.plot(xlim, [24.1, 24.1], 'b')
@image_comparison(baseline_images=['logit_scales'], remove_text=True,
extensions=['png'])
def test_logit_scales():
fig, ax = plt.subplots()
# Typical extinction curve for logit
x = np.array([0.001, 0.003, 0.01, 0.03, 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 0.97, 0.99, 0.997, 0.999])
y = 1.0 / x
ax.plot(x, y)
ax.set_xscale('logit')
ax.grid(True)
bbox = ax.get_tightbbox(fig.canvas.get_renderer())
assert np.isfinite(bbox.x0)
assert np.isfinite(bbox.y0)
def test_log_scatter():
"""Issue #1799"""
fig, ax = plt.subplots(1)
x = np.arange(10)
y = np.arange(10) - 1
ax.scatter(x, y)
buf = io.BytesIO()
fig.savefig(buf, format='pdf')
buf = io.BytesIO()
fig.savefig(buf, format='eps')
buf = io.BytesIO()
fig.savefig(buf, format='svg')
def test_logscale_subs():
fig, ax = plt.subplots()
ax.set_yscale('log', subsy=np.array([2, 3, 4]))
# force draw
fig.canvas.draw()
@image_comparison(baseline_images=['logscale_mask'], remove_text=True,
extensions=['png'])
def test_logscale_mask():
# Check that zero values are masked correctly on log scales.
# See github issue 8045
xs = np.linspace(0, 50, 1001)
fig, ax = plt.subplots()
ax.plot(np.exp(-xs**2))
fig.canvas.draw()
ax.set(yscale="log")
def test_extra_kwargs_raise():
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.set_yscale('log', nonpos='mask')
def test_logscale_invert_transform():
fig, ax = plt.subplots()
ax.set_yscale('log')
# get transformation from data to axes
tform = (ax.transAxes + ax.transData.inverted()).inverted()
# direct test of log transform inversion
with pytest.warns(MatplotlibDeprecationWarning):
assert isinstance(Log10Transform().inverted(), InvertedLog10Transform)
def test_logscale_transform_repr():
# check that repr of log transform succeeds
fig, ax = plt.subplots()
ax.set_yscale('log')
s = repr(ax.transData)
# check that repr of log transform succeeds
with pytest.warns(MatplotlibDeprecationWarning):
s = repr(Log10Transform(nonpos='clip'))
@image_comparison(baseline_images=['logscale_nonpos_values'], remove_text=True,
extensions=['png'], tol=0.02, style='mpl20')
def test_logscale_nonpos_values():
np.random.seed(19680801)
xs = np.random.normal(size=int(1e3))
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1.hist(xs, range=(-5, 5), bins=10)
ax1.set_yscale('log')
ax2.hist(xs, range=(-5, 5), bins=10)
ax2.set_yscale('log', nonposy='mask')
xdata = np.arange(0, 10, 0.01)
ydata = np.exp(-xdata)
edata = 0.2*(10-xdata)*np.cos(5*xdata)*np.exp(-xdata)
ax3.fill_between(xdata, ydata - edata, ydata + edata)
ax3.set_yscale('log')
x = np.logspace(-1, 1)
y = x ** 3
yerr = x**2
ax4.errorbar(x, y, yerr=yerr)
ax4.set_yscale('log')
ax4.set_xscale('log')
def test_invalid_log_lims():
# Check that invalid log scale limits are ignored
fig, ax = plt.subplots()
ax.scatter(range(0, 4), range(0, 4))
ax.set_xscale('log')
original_xlim = ax.get_xlim()
with pytest.warns(UserWarning):
ax.set_xlim(left=0)
assert ax.get_xlim() == original_xlim
with pytest.warns(UserWarning):
ax.set_xlim(right=-1)
assert ax.get_xlim() == original_xlim
ax.set_yscale('log')
original_ylim = ax.get_ylim()
with pytest.warns(UserWarning):
ax.set_ylim(bottom=0)
assert ax.get_ylim() == original_ylim
with pytest.warns(UserWarning):
ax.set_ylim(top=-1)
assert ax.get_ylim() == original_ylim
@image_comparison(baseline_images=['function_scales'], remove_text=True,
extensions=['png'], style='mpl20')
def test_function_scale():
def inverse(x):
return x**2
def forward(x):
return x**(1/2)
fig, ax = plt.subplots()
x = np.arange(1, 1000)
ax.plot(x, x)
ax.set_xscale('function', functions=(forward, inverse))
ax.set_xlim(1, 1000)
|
b6aab9d2b05b197b303d0f04b7a3e4438e7061389ec3669fe956b73f202dc74d
|
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
import pytest
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
class TestMaxNLocator(object):
basic_data = [
(20, 100, np.array([20., 40., 60., 80., 100.])),
(0.001, 0.0001, np.array([0., 0.0002, 0.0004, 0.0006, 0.0008, 0.001])),
(-1e15, 1e15, np.array([-1.0e+15, -5.0e+14, 0e+00, 5e+14, 1.0e+15])),
(0, 0.85e-50, np.arange(6) * 2e-51),
(-0.85e-50, 0, np.arange(-5, 1) * 2e-51),
]
integer_data = [
(-0.1, 1.1, None, np.array([-1, 0, 1, 2])),
(-0.1, 0.95, None, np.array([-0.25, 0, 0.25, 0.5, 0.75, 1.0])),
(1, 55, [1, 1.5, 5, 6, 10], np.array([0, 15, 30, 45, 60])),
]
@pytest.mark.parametrize('vmin, vmax, expected', basic_data)
def test_basic(self, vmin, vmax, expected):
loc = mticker.MaxNLocator(nbins=5)
assert_almost_equal(loc.tick_values(vmin, vmax), expected)
@pytest.mark.parametrize('vmin, vmax, steps, expected', integer_data)
def test_integer(self, vmin, vmax, steps, expected):
loc = mticker.MaxNLocator(nbins=5, integer=True, steps=steps)
assert_almost_equal(loc.tick_values(vmin, vmax), expected)
class TestLinearLocator(object):
def test_basic(self):
loc = mticker.LinearLocator(numticks=3)
test_value = np.array([-0.8, -0.3, 0.2])
assert_almost_equal(loc.tick_values(-0.8, 0.2), test_value)
def test_set_params(self):
"""
Create linear locator with presets={}, numticks=2 and change it to
something else. See if change was successful. Should not exception.
"""
loc = mticker.LinearLocator(numticks=2)
loc.set_params(numticks=8, presets={(0, 1): []})
assert loc.numticks == 8
assert loc.presets == {(0, 1): []}
class TestMultipleLocator(object):
def test_basic(self):
loc = mticker.MultipleLocator(base=3.147)
test_value = np.array([-9.441, -6.294, -3.147, 0., 3.147, 6.294,
9.441, 12.588])
assert_almost_equal(loc.tick_values(-7, 10), test_value)
def test_view_limits(self):
"""
Test basic behavior of view limits.
"""
with matplotlib.rc_context({'axes.autolimit_mode': 'data'}):
loc = mticker.MultipleLocator(base=3.147)
assert_almost_equal(loc.view_limits(-5, 5), (-5, 5))
def test_view_limits_round_numbers(self):
"""
Test that everything works properly with 'round_numbers' for auto
limit.
"""
with matplotlib.rc_context({'axes.autolimit_mode': 'round_numbers'}):
loc = mticker.MultipleLocator(base=3.147)
assert_almost_equal(loc.view_limits(-4, 4), (-6.294, 6.294))
def test_set_params(self):
"""
Create multiple locator with 0.7 base, and change it to something else.
See if change was successful.
"""
mult = mticker.MultipleLocator(base=0.7)
mult.set_params(base=1.7)
assert mult._edge.step == 1.7
class TestAutoMinorLocator(object):
def test_basic(self):
fig, ax = plt.subplots()
ax.set_xlim(0, 1.39)
ax.minorticks_on()
test_value = np.array([0.05, 0.1, 0.15, 0.25, 0.3, 0.35, 0.45,
0.5, 0.55, 0.65, 0.7, 0.75, 0.85, 0.9,
0.95, 1.05, 1.1, 1.15, 1.25, 1.3, 1.35])
assert_almost_equal(ax.xaxis.get_ticklocs(minor=True), test_value)
# NB: the following values are assuming that *xlim* is [0, 5]
params = [
(0, 0), # no major tick => no minor tick either
(1, 0) # a single major tick => no minor tick
]
@pytest.mark.parametrize('nb_majorticks, expected_nb_minorticks', params)
def test_low_number_of_majorticks(
self, nb_majorticks, expected_nb_minorticks):
# This test is related to issue #8804
fig, ax = plt.subplots()
xlims = (0, 5) # easier to test the different code paths
ax.set_xlim(*xlims)
ax.set_xticks(np.linspace(xlims[0], xlims[1], nb_majorticks))
ax.minorticks_on()
ax.xaxis.set_minor_locator(mticker.AutoMinorLocator())
assert len(ax.xaxis.get_minorticklocs()) == expected_nb_minorticks
majorstep_minordivisions = [(1, 5),
(2, 4),
(2.5, 5),
(5, 5),
(10, 5)]
# This test is meant to verify the parameterization for
# test_number_of_minor_ticks
def test_using_all_default_major_steps(self):
with matplotlib.rc_context({'_internal.classic_mode': False}):
majorsteps = [x[0] for x in self.majorstep_minordivisions]
assert np.allclose(majorsteps, mticker.AutoLocator()._steps)
@pytest.mark.parametrize('major_step, expected_nb_minordivisions',
majorstep_minordivisions)
def test_number_of_minor_ticks(
self, major_step, expected_nb_minordivisions):
fig, ax = plt.subplots()
xlims = (0, major_step)
ax.set_xlim(*xlims)
ax.set_xticks(xlims)
ax.minorticks_on()
ax.xaxis.set_minor_locator(mticker.AutoMinorLocator())
nb_minor_divisions = len(ax.xaxis.get_minorticklocs()) + 1
assert nb_minor_divisions == expected_nb_minordivisions
limits = [(0, 1.39), (0, 0.139),
(0, 0.11e-19), (0, 0.112e-12),
(-2.0e-07, -3.3e-08), (1.20e-06, 1.42e-06),
(-1.34e-06, -1.44e-06), (-8.76e-07, -1.51e-06)]
reference = [
[0.05, 0.1, 0.15, 0.25, 0.3, 0.35, 0.45, 0.5, 0.55, 0.65, 0.7,
0.75, 0.85, 0.9, 0.95, 1.05, 1.1, 1.15, 1.25, 1.3, 1.35],
[0.005, 0.01, 0.015, 0.025, 0.03, 0.035, 0.045, 0.05, 0.055, 0.065,
0.07, 0.075, 0.085, 0.09, 0.095, 0.105, 0.11, 0.115, 0.125, 0.13,
0.135],
[5.00e-22, 1.00e-21, 1.50e-21, 2.50e-21, 3.00e-21, 3.50e-21, 4.50e-21,
5.00e-21, 5.50e-21, 6.50e-21, 7.00e-21, 7.50e-21, 8.50e-21, 9.00e-21,
9.50e-21, 1.05e-20, 1.10e-20],
[5.00e-15, 1.00e-14, 1.50e-14, 2.50e-14, 3.00e-14, 3.50e-14, 4.50e-14,
5.00e-14, 5.50e-14, 6.50e-14, 7.00e-14, 7.50e-14, 8.50e-14, 9.00e-14,
9.50e-14, 1.05e-13, 1.10e-13],
[-1.95e-07, -1.90e-07, -1.85e-07, -1.75e-07, -1.70e-07, -1.65e-07,
-1.55e-07, -1.50e-07, -1.45e-07, -1.35e-07, -1.30e-07, -1.25e-07,
-1.15e-07, -1.10e-07, -1.05e-07, -9.50e-08, -9.00e-08, -8.50e-08,
-7.50e-08, -7.00e-08, -6.50e-08, -5.50e-08, -5.00e-08, -4.50e-08,
-3.50e-08],
[1.21e-06, 1.22e-06, 1.23e-06, 1.24e-06, 1.26e-06, 1.27e-06, 1.28e-06,
1.29e-06, 1.31e-06, 1.32e-06, 1.33e-06, 1.34e-06, 1.36e-06, 1.37e-06,
1.38e-06, 1.39e-06, 1.41e-06, 1.42e-06],
[-1.435e-06, -1.430e-06, -1.425e-06, -1.415e-06, -1.410e-06,
-1.405e-06, -1.395e-06, -1.390e-06, -1.385e-06, -1.375e-06,
-1.370e-06, -1.365e-06, -1.355e-06, -1.350e-06, -1.345e-06],
[-1.48e-06, -1.46e-06, -1.44e-06, -1.42e-06, -1.38e-06, -1.36e-06,
-1.34e-06, -1.32e-06, -1.28e-06, -1.26e-06, -1.24e-06, -1.22e-06,
-1.18e-06, -1.16e-06, -1.14e-06, -1.12e-06, -1.08e-06, -1.06e-06,
-1.04e-06, -1.02e-06, -9.80e-07, -9.60e-07, -9.40e-07, -9.20e-07,
-8.80e-07]]
additional_data = list(zip(limits, reference))
@pytest.mark.parametrize('lim, ref', additional_data)
def test_additional(self, lim, ref):
fig, ax = plt.subplots()
ax.minorticks_on()
ax.grid(True, 'minor', 'y', linewidth=1)
ax.grid(True, 'major', color='k', linewidth=1)
ax.set_ylim(lim)
assert_almost_equal(ax.yaxis.get_ticklocs(minor=True), ref)
class TestLogLocator(object):
def test_basic(self):
loc = mticker.LogLocator(numticks=5)
with pytest.raises(ValueError):
loc.tick_values(0, 1000)
test_value = np.array([1.00000000e-05, 1.00000000e-03, 1.00000000e-01,
1.00000000e+01, 1.00000000e+03, 1.00000000e+05,
1.00000000e+07, 1.000000000e+09])
assert_almost_equal(loc.tick_values(0.001, 1.1e5), test_value)
loc = mticker.LogLocator(base=2)
test_value = np.array([0.5, 1., 2., 4., 8., 16., 32., 64., 128., 256.])
assert_almost_equal(loc.tick_values(1, 100), test_value)
def test_switch_to_autolocator(self):
loc = mticker.LogLocator(subs="all")
assert_array_equal(loc.tick_values(0.45, 0.55),
[0.44, 0.46, 0.48, 0.5, 0.52, 0.54, 0.56])
# check that we *skip* 1.0, and 10, because this is a minor locator
loc = mticker.LogLocator(subs=np.arange(2, 10))
assert 1.0 not in loc.tick_values(0.9, 20.)
assert 10.0 not in loc.tick_values(0.9, 20.)
def test_set_params(self):
"""
Create log locator with default value, base=10.0, subs=[1.0],
numdecs=4, numticks=15 and change it to something else.
See if change was successful. Should not raise exception.
"""
loc = mticker.LogLocator()
loc.set_params(numticks=7, numdecs=8, subs=[2.0], base=4)
assert loc.numticks == 7
assert loc.numdecs == 8
assert loc._base == 4
assert list(loc._subs) == [2.0]
class TestNullLocator(object):
def test_set_params(self):
"""
Create null locator, and attempt to call set_params() on it.
Should not exception, and should raise a warning.
"""
loc = mticker.NullLocator()
with pytest.warns(UserWarning):
loc.set_params()
class TestLogitLocator(object):
def test_set_params(self):
"""
Create logit locator with default minor=False, and change it to
something else. See if change was successful. Should not exception.
"""
loc = mticker.LogitLocator() # Defaults to false.
loc.set_params(minor=True)
assert loc.minor
class TestFixedLocator(object):
def test_set_params(self):
"""
Create fixed locator with 5 nbins, and change it to something else.
See if change was successful.
Should not exception.
"""
fixed = mticker.FixedLocator(range(0, 24), nbins=5)
fixed.set_params(nbins=7)
assert fixed.nbins == 7
class TestIndexLocator(object):
def test_set_params(self):
"""
Create index locator with 3 base, 4 offset. and change it to something
else. See if change was successful.
Should not exception.
"""
index = mticker.IndexLocator(base=3, offset=4)
index.set_params(base=7, offset=7)
assert index._base == 7
assert index.offset == 7
class TestSymmetricalLogLocator(object):
def test_set_params(self):
"""
Create symmetrical log locator with default subs =[1.0] numticks = 15,
and change it to something else.
See if change was successful.
Should not exception.
"""
sym = mticker.SymmetricalLogLocator(base=10, linthresh=1)
sym.set_params(subs=[2.0], numticks=8)
assert sym._subs == [2.0]
assert sym.numticks == 8
class TestScalarFormatter(object):
offset_data = [
(123, 189, 0),
(-189, -123, 0),
(12341, 12349, 12340),
(-12349, -12341, -12340),
(99999.5, 100010.5, 100000),
(-100010.5, -99999.5, -100000),
(99990.5, 100000.5, 100000),
(-100000.5, -99990.5, -100000),
(1233999, 1234001, 1234000),
(-1234001, -1233999, -1234000),
(1, 1, 1),
(123, 123, 0),
# Test cases courtesy of @WeatherGod
(.4538, .4578, .45),
(3789.12, 3783.1, 3780),
(45124.3, 45831.75, 45000),
(0.000721, 0.0007243, 0.00072),
(12592.82, 12591.43, 12590),
(9., 12., 0),
(900., 1200., 0),
(1900., 1200., 0),
(0.99, 1.01, 1),
(9.99, 10.01, 10),
(99.99, 100.01, 100),
(5.99, 6.01, 6),
(15.99, 16.01, 16),
(-0.452, 0.492, 0),
(-0.492, 0.492, 0),
(12331.4, 12350.5, 12300),
(-12335.3, 12335.3, 0),
]
use_offset_data = [True, False]
# (sci_type, scilimits, lim, orderOfMag, fewticks)
scilimits_data = [
(False, (0, 0), (10.0, 20.0), 0, False),
(True, (-2, 2), (-10, 20), 0, False),
(True, (-2, 2), (-20, 10), 0, False),
(True, (-2, 2), (-110, 120), 2, False),
(True, (-2, 2), (-120, 110), 2, False),
(True, (-2, 2), (-.001, 0.002), -3, False),
(True, (-7, 7), (0.18e10, 0.83e10), 9, True),
(True, (0, 0), (-1e5, 1e5), 5, False),
(True, (6, 6), (-1e5, 1e5), 6, False),
]
@pytest.mark.parametrize('left, right, offset', offset_data)
def test_offset_value(self, left, right, offset):
fig, ax = plt.subplots()
formatter = ax.get_xaxis().get_major_formatter()
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', 'Attempting to set identical',
UserWarning)
ax.set_xlim(left, right)
assert len(w) == (1 if left == right else 0)
ax.get_xaxis()._update_ticks()
assert formatter.offset == offset
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', 'Attempting to set identical',
UserWarning)
ax.set_xlim(right, left)
assert len(w) == (1 if left == right else 0)
ax.get_xaxis()._update_ticks()
assert formatter.offset == offset
@pytest.mark.parametrize('use_offset', use_offset_data)
def test_use_offset(self, use_offset):
with matplotlib.rc_context({'axes.formatter.useoffset': use_offset}):
tmp_form = mticker.ScalarFormatter()
assert use_offset == tmp_form.get_useOffset()
@pytest.mark.parametrize(
'sci_type, scilimits, lim, orderOfMag, fewticks', scilimits_data)
def test_scilimits(self, sci_type, scilimits, lim, orderOfMag,
fewticks):
tmp_form = mticker.ScalarFormatter()
tmp_form.set_scientific(sci_type)
tmp_form.set_powerlimits(scilimits)
fig, ax = plt.subplots()
ax.yaxis.set_major_formatter(tmp_form)
ax.set_ylim(*lim)
if fewticks:
ax.yaxis.set_major_locator(mticker.MaxNLocator(4))
tmp_form.set_locs(ax.yaxis.get_majorticklocs())
assert orderOfMag == tmp_form.orderOfMagnitude
class FakeAxis(object):
"""Allow Formatter to be called without having a "full" plot set up."""
def __init__(self, vmin=1, vmax=10):
self.vmin = vmin
self.vmax = vmax
def get_view_interval(self):
return self.vmin, self.vmax
class TestLogFormatterExponent(object):
param_data = [
(True, 4, np.arange(-3, 4.0), np.arange(-3, 4.0),
['-3', '-2', '-1', '0', '1', '2', '3']),
# With labelOnlyBase=False, non-integer powers should be nicely
# formatted.
(False, 10, np.array([0.1, 0.00001, np.pi, 0.2, -0.2, -0.00001]),
range(6), ['0.1', '1e-05', '3.14', '0.2', '-0.2', '-1e-05']),
(False, 50, np.array([3, 5, 12, 42], dtype='float'), range(6),
['3', '5', '12', '42']),
]
base_data = [2.0, 5.0, 10.0, np.pi, np.e]
@pytest.mark.parametrize(
'labelOnlyBase, exponent, locs, positions, expected', param_data)
@pytest.mark.parametrize('base', base_data)
def test_basic(self, labelOnlyBase, base, exponent, locs, positions,
expected):
formatter = mticker.LogFormatterExponent(base=base,
labelOnlyBase=labelOnlyBase)
formatter.axis = FakeAxis(1, base**exponent)
vals = base**locs
labels = [formatter(x, pos) for (x, pos) in zip(vals, positions)]
assert labels == expected
def test_blank(self):
# Should be a blank string for non-integer powers if labelOnlyBase=True
formatter = mticker.LogFormatterExponent(base=10, labelOnlyBase=True)
formatter.axis = FakeAxis()
assert formatter(10**0.1) == ''
class TestLogFormatterMathtext():
fmt = mticker.LogFormatterMathtext()
test_data = [
(0, 1, '$\\mathdefault{10^{0}}$'),
(0, 1e-2, '$\\mathdefault{10^{-2}}$'),
(0, 1e2, '$\\mathdefault{10^{2}}$'),
(3, 1, '$\\mathdefault{1}$'),
(3, 1e-2, '$\\mathdefault{0.01}$'),
(3, 1e2, '$\\mathdefault{100}$'),
(3, 1e-3, '$\\mathdefault{10^{-3}}$'),
(3, 1e3, '$\\mathdefault{10^{3}}$'),
]
@pytest.mark.parametrize('min_exponent, value, expected', test_data)
def test_min_exponent(self, min_exponent, value, expected):
with matplotlib.rc_context({'axes.formatter.min_exponent':
min_exponent}):
assert self.fmt(value) == expected
class TestLogFormatterSciNotation(object):
test_data = [
(2, 0.03125, '$\\mathdefault{2^{-5}}$'),
(2, 1, '$\\mathdefault{2^{0}}$'),
(2, 32, '$\\mathdefault{2^{5}}$'),
(2, 0.0375, '$\\mathdefault{1.2\\times2^{-5}}$'),
(2, 1.2, '$\\mathdefault{1.2\\times2^{0}}$'),
(2, 38.4, '$\\mathdefault{1.2\\times2^{5}}$'),
(10, -1, '$\\mathdefault{-10^{0}}$'),
(10, 1e-05, '$\\mathdefault{10^{-5}}$'),
(10, 1, '$\\mathdefault{10^{0}}$'),
(10, 100000, '$\\mathdefault{10^{5}}$'),
(10, 2e-05, '$\\mathdefault{2\\times10^{-5}}$'),
(10, 2, '$\\mathdefault{2\\times10^{0}}$'),
(10, 200000, '$\\mathdefault{2\\times10^{5}}$'),
(10, 5e-05, '$\\mathdefault{5\\times10^{-5}}$'),
(10, 5, '$\\mathdefault{5\\times10^{0}}$'),
(10, 500000, '$\\mathdefault{5\\times10^{5}}$'),
]
@pytest.mark.style('default')
@pytest.mark.parametrize('base, value, expected', test_data)
def test_basic(self, base, value, expected):
formatter = mticker.LogFormatterSciNotation(base=base)
formatter.sublabel = {1, 2, 5, 1.2}
with matplotlib.rc_context({'text.usetex': False}):
assert formatter(value) == expected
class TestLogFormatter(object):
pprint_data = [
(3.141592654e-05, 0.001, '3.142e-5'),
(0.0003141592654, 0.001, '3.142e-4'),
(0.003141592654, 0.001, '3.142e-3'),
(0.03141592654, 0.001, '3.142e-2'),
(0.3141592654, 0.001, '3.142e-1'),
(3.141592654, 0.001, '3.142'),
(31.41592654, 0.001, '3.142e1'),
(314.1592654, 0.001, '3.142e2'),
(3141.592654, 0.001, '3.142e3'),
(31415.92654, 0.001, '3.142e4'),
(314159.2654, 0.001, '3.142e5'),
(1e-05, 0.001, '1e-5'),
(0.0001, 0.001, '1e-4'),
(0.001, 0.001, '1e-3'),
(0.01, 0.001, '1e-2'),
(0.1, 0.001, '1e-1'),
(1, 0.001, '1'),
(10, 0.001, '10'),
(100, 0.001, '100'),
(1000, 0.001, '1000'),
(10000, 0.001, '1e4'),
(100000, 0.001, '1e5'),
(3.141592654e-05, 0.015, '0'),
(0.0003141592654, 0.015, '0'),
(0.003141592654, 0.015, '0.003'),
(0.03141592654, 0.015, '0.031'),
(0.3141592654, 0.015, '0.314'),
(3.141592654, 0.015, '3.142'),
(31.41592654, 0.015, '31.416'),
(314.1592654, 0.015, '314.159'),
(3141.592654, 0.015, '3141.593'),
(31415.92654, 0.015, '31415.927'),
(314159.2654, 0.015, '314159.265'),
(1e-05, 0.015, '0'),
(0.0001, 0.015, '0'),
(0.001, 0.015, '0.001'),
(0.01, 0.015, '0.01'),
(0.1, 0.015, '0.1'),
(1, 0.015, '1'),
(10, 0.015, '10'),
(100, 0.015, '100'),
(1000, 0.015, '1000'),
(10000, 0.015, '10000'),
(100000, 0.015, '100000'),
(3.141592654e-05, 0.5, '0'),
(0.0003141592654, 0.5, '0'),
(0.003141592654, 0.5, '0.003'),
(0.03141592654, 0.5, '0.031'),
(0.3141592654, 0.5, '0.314'),
(3.141592654, 0.5, '3.142'),
(31.41592654, 0.5, '31.416'),
(314.1592654, 0.5, '314.159'),
(3141.592654, 0.5, '3141.593'),
(31415.92654, 0.5, '31415.927'),
(314159.2654, 0.5, '314159.265'),
(1e-05, 0.5, '0'),
(0.0001, 0.5, '0'),
(0.001, 0.5, '0.001'),
(0.01, 0.5, '0.01'),
(0.1, 0.5, '0.1'),
(1, 0.5, '1'),
(10, 0.5, '10'),
(100, 0.5, '100'),
(1000, 0.5, '1000'),
(10000, 0.5, '10000'),
(100000, 0.5, '100000'),
(3.141592654e-05, 5, '0'),
(0.0003141592654, 5, '0'),
(0.003141592654, 5, '0'),
(0.03141592654, 5, '0.03'),
(0.3141592654, 5, '0.31'),
(3.141592654, 5, '3.14'),
(31.41592654, 5, '31.42'),
(314.1592654, 5, '314.16'),
(3141.592654, 5, '3141.59'),
(31415.92654, 5, '31415.93'),
(314159.2654, 5, '314159.27'),
(1e-05, 5, '0'),
(0.0001, 5, '0'),
(0.001, 5, '0'),
(0.01, 5, '0.01'),
(0.1, 5, '0.1'),
(1, 5, '1'),
(10, 5, '10'),
(100, 5, '100'),
(1000, 5, '1000'),
(10000, 5, '10000'),
(100000, 5, '100000'),
(3.141592654e-05, 100, '0'),
(0.0003141592654, 100, '0'),
(0.003141592654, 100, '0'),
(0.03141592654, 100, '0'),
(0.3141592654, 100, '0.3'),
(3.141592654, 100, '3.1'),
(31.41592654, 100, '31.4'),
(314.1592654, 100, '314.2'),
(3141.592654, 100, '3141.6'),
(31415.92654, 100, '31415.9'),
(314159.2654, 100, '314159.3'),
(1e-05, 100, '0'),
(0.0001, 100, '0'),
(0.001, 100, '0'),
(0.01, 100, '0'),
(0.1, 100, '0.1'),
(1, 100, '1'),
(10, 100, '10'),
(100, 100, '100'),
(1000, 100, '1000'),
(10000, 100, '10000'),
(100000, 100, '100000'),
(3.141592654e-05, 1000000.0, '3.1e-5'),
(0.0003141592654, 1000000.0, '3.1e-4'),
(0.003141592654, 1000000.0, '3.1e-3'),
(0.03141592654, 1000000.0, '3.1e-2'),
(0.3141592654, 1000000.0, '3.1e-1'),
(3.141592654, 1000000.0, '3.1'),
(31.41592654, 1000000.0, '3.1e1'),
(314.1592654, 1000000.0, '3.1e2'),
(3141.592654, 1000000.0, '3.1e3'),
(31415.92654, 1000000.0, '3.1e4'),
(314159.2654, 1000000.0, '3.1e5'),
(1e-05, 1000000.0, '1e-5'),
(0.0001, 1000000.0, '1e-4'),
(0.001, 1000000.0, '1e-3'),
(0.01, 1000000.0, '1e-2'),
(0.1, 1000000.0, '1e-1'),
(1, 1000000.0, '1'),
(10, 1000000.0, '10'),
(100, 1000000.0, '100'),
(1000, 1000000.0, '1000'),
(10000, 1000000.0, '1e4'),
(100000, 1000000.0, '1e5'),
]
@pytest.mark.parametrize('value, domain, expected', pprint_data)
def test_pprint(self, value, domain, expected):
fmt = mticker.LogFormatter()
label = fmt._pprint_val(value, domain)
assert label == expected
def _sub_labels(self, axis, subs=()):
"Test whether locator marks subs to be labeled"
fmt = axis.get_minor_formatter()
minor_tlocs = axis.get_minorticklocs()
fmt.set_locs(minor_tlocs)
coefs = minor_tlocs / 10**(np.floor(np.log10(minor_tlocs)))
label_expected = [np.round(c) in subs for c in coefs]
label_test = [fmt(x) != '' for x in minor_tlocs]
assert label_test == label_expected
@pytest.mark.style('default')
def test_sublabel(self):
# test label locator
fig, ax = plt.subplots()
ax.set_xscale('log')
ax.xaxis.set_major_locator(mticker.LogLocator(base=10, subs=[]))
ax.xaxis.set_minor_locator(mticker.LogLocator(base=10,
subs=np.arange(2, 10)))
ax.xaxis.set_major_formatter(mticker.LogFormatter(labelOnlyBase=True))
ax.xaxis.set_minor_formatter(mticker.LogFormatter(labelOnlyBase=False))
# axis range above 3 decades, only bases are labeled
ax.set_xlim(1, 1e4)
fmt = ax.xaxis.get_major_formatter()
fmt.set_locs(ax.xaxis.get_majorticklocs())
show_major_labels = [fmt(x) != ''
for x in ax.xaxis.get_majorticklocs()]
assert np.all(show_major_labels)
self._sub_labels(ax.xaxis, subs=[])
# For the next two, if the numdec threshold in LogFormatter.set_locs
# were 3, then the label sub would be 3 for 2-3 decades and (2,5)
# for 1-2 decades. With a threshold of 1, subs are not labeled.
# axis range at 2 to 3 decades
ax.set_xlim(1, 800)
self._sub_labels(ax.xaxis, subs=[])
# axis range at 1 to 2 decades
ax.set_xlim(1, 80)
self._sub_labels(ax.xaxis, subs=[])
# axis range at 0.4 to 1 decades, label subs 2, 3, 4, 6
ax.set_xlim(1, 8)
self._sub_labels(ax.xaxis, subs=[2, 3, 4, 6])
# axis range at 0 to 0.4 decades, label all
ax.set_xlim(0.5, 0.9)
self._sub_labels(ax.xaxis, subs=np.arange(2, 10, dtype=int))
@pytest.mark.parametrize('val', [1, 10, 100, 1000])
def test_LogFormatter_call(self, val):
# test _num_to_string method used in __call__
temp_lf = mticker.LogFormatter()
temp_lf.axis = FakeAxis()
assert temp_lf(val) == str(val)
class TestFormatStrFormatter(object):
def test_basic(self):
# test % style formatter
tmp_form = mticker.FormatStrFormatter('%05d')
assert '00002' == tmp_form(2)
class TestStrMethodFormatter(object):
test_data = [
('{x:05d}', (2,), '00002'),
('{x:03d}-{pos:02d}', (2, 1), '002-01'),
]
@pytest.mark.parametrize('format, input, expected', test_data)
def test_basic(self, format, input, expected):
fmt = mticker.StrMethodFormatter(format)
assert fmt(*input) == expected
class TestEngFormatter(object):
# (unicode_minus, input, expected) where ''expected'' corresponds to the
# outputs respectively returned when (places=None, places=0, places=2)
# unicode_minus is a boolean value for the rcParam['axes.unicode_minus']
raw_format_data = [
(False, -1234.56789, ('-1.23457 k', '-1 k', '-1.23 k')),
(True, -1234.56789, ('\N{MINUS SIGN}1.23457 k', '\N{MINUS SIGN}1 k',
'\N{MINUS SIGN}1.23 k')),
(False, -1.23456789, ('-1.23457', '-1', '-1.23')),
(True, -1.23456789, ('\N{MINUS SIGN}1.23457', '\N{MINUS SIGN}1',
'\N{MINUS SIGN}1.23')),
(False, -0.123456789, ('-123.457 m', '-123 m', '-123.46 m')),
(True, -0.123456789, ('\N{MINUS SIGN}123.457 m', '\N{MINUS SIGN}123 m',
'\N{MINUS SIGN}123.46 m')),
(False, -0.00123456789, ('-1.23457 m', '-1 m', '-1.23 m')),
(True, -0.00123456789, ('\N{MINUS SIGN}1.23457 m', '\N{MINUS SIGN}1 m',
'\N{MINUS SIGN}1.23 m')),
(True, -0.0, ('0', '0', '0.00')),
(True, -0, ('0', '0', '0.00')),
(True, 0, ('0', '0', '0.00')),
(True, 1.23456789e-6, ('1.23457 µ', '1 µ', '1.23 µ')),
(True, 0.123456789, ('123.457 m', '123 m', '123.46 m')),
(True, 0.1, ('100 m', '100 m', '100.00 m')),
(True, 1, ('1', '1', '1.00')),
(True, 1.23456789, ('1.23457', '1', '1.23')),
# places=0: corner-case rounding
(True, 999.9, ('999.9', '1 k', '999.90')),
# corner-case rounding for all
(True, 999.9999, ('1 k', '1 k', '1.00 k')),
# negative corner-case
(False, -999.9999, ('-1 k', '-1 k', '-1.00 k')),
(True, -999.9999, ('\N{MINUS SIGN}1 k', '\N{MINUS SIGN}1 k',
'\N{MINUS SIGN}1.00 k')),
(True, 1000, ('1 k', '1 k', '1.00 k')),
(True, 1001, ('1.001 k', '1 k', '1.00 k')),
(True, 100001, ('100.001 k', '100 k', '100.00 k')),
(True, 987654.321, ('987.654 k', '988 k', '987.65 k')),
# OoR value (> 1000 Y)
(True, 1.23e27, ('1230 Y', '1230 Y', '1230.00 Y'))
]
@pytest.mark.parametrize('unicode_minus, input, expected', raw_format_data)
def test_params(self, unicode_minus, input, expected):
"""
Test the formatting of EngFormatter for various values of the 'places'
argument, in several cases:
0. without a unit symbol but with a (default) space separator;
1. with both a unit symbol and a (default) space separator;
2. with both a unit symbol and some non default separators;
3. without a unit symbol but with some non default separators.
Note that cases 2. and 3. are looped over several separator strings.
"""
plt.rcParams['axes.unicode_minus'] = unicode_minus
UNIT = 's' # seconds
DIGITS = '0123456789' # %timeit showed 10-20% faster search than set
# Case 0: unit='' (default) and sep=' ' (default).
# 'expected' already corresponds to this reference case.
exp_outputs = expected
formatters = (
mticker.EngFormatter(), # places=None (default)
mticker.EngFormatter(places=0),
mticker.EngFormatter(places=2)
)
for _formatter, _exp_output in zip(formatters, exp_outputs):
assert _formatter(input) == _exp_output
# Case 1: unit=UNIT and sep=' ' (default).
# Append a unit symbol to the reference case.
# Beware of the values in [1, 1000), where there is no prefix!
exp_outputs = (_s + " " + UNIT if _s[-1] in DIGITS # case w/o prefix
else _s + UNIT for _s in expected)
formatters = (
mticker.EngFormatter(unit=UNIT), # places=None (default)
mticker.EngFormatter(unit=UNIT, places=0),
mticker.EngFormatter(unit=UNIT, places=2)
)
for _formatter, _exp_output in zip(formatters, exp_outputs):
assert _formatter(input) == _exp_output
# Test several non default separators: no separator, a narrow
# no-break space (unicode character) and an extravagant string.
for _sep in ("", "\N{NARROW NO-BREAK SPACE}", "@_@"):
# Case 2: unit=UNIT and sep=_sep.
# Replace the default space separator from the reference case
# with the tested one `_sep` and append a unit symbol to it.
exp_outputs = (_s + _sep + UNIT if _s[-1] in DIGITS # no prefix
else _s.replace(" ", _sep) + UNIT
for _s in expected)
formatters = (
mticker.EngFormatter(unit=UNIT, sep=_sep), # places=None
mticker.EngFormatter(unit=UNIT, places=0, sep=_sep),
mticker.EngFormatter(unit=UNIT, places=2, sep=_sep)
)
for _formatter, _exp_output in zip(formatters, exp_outputs):
assert _formatter(input) == _exp_output
# Case 3: unit='' (default) and sep=_sep.
# Replace the default space separator from the reference case
# with the tested one `_sep`. Reference case is already unitless.
exp_outputs = (_s.replace(" ", _sep) for _s in expected)
formatters = (
mticker.EngFormatter(sep=_sep), # places=None (default)
mticker.EngFormatter(places=0, sep=_sep),
mticker.EngFormatter(places=2, sep=_sep)
)
for _formatter, _exp_output in zip(formatters, exp_outputs):
assert _formatter(input) == _exp_output
def test_engformatter_usetex_useMathText():
fig, ax = plt.subplots()
ax.plot([0, 500, 1000], [0, 500, 1000])
ax.set_xticks([0, 500, 1000])
for formatter in (mticker.EngFormatter(usetex=True),
mticker.EngFormatter(useMathText=True)):
ax.xaxis.set_major_formatter(formatter)
fig.canvas.draw()
x_tick_label_text = [labl.get_text() for labl in ax.get_xticklabels()]
# Checking if the dollar `$` signs have been inserted around numbers
# in tick labels.
assert x_tick_label_text == ['$0$', '$500$', '$1$ k']
class TestPercentFormatter(object):
percent_data = [
# Check explicitly set decimals over different intervals and values
(100, 0, '%', 120, 100, '120%'),
(100, 0, '%', 100, 90, '100%'),
(100, 0, '%', 90, 50, '90%'),
(100, 0, '%', -1.7, 40, '-2%'),
(100, 1, '%', 90.0, 100, '90.0%'),
(100, 1, '%', 80.1, 90, '80.1%'),
(100, 1, '%', 70.23, 50, '70.2%'),
# 60.554 instead of 60.55: see https://bugs.python.org/issue5118
(100, 1, '%', -60.554, 40, '-60.6%'),
# Check auto decimals over different intervals and values
(100, None, '%', 95, 1, '95.00%'),
(1.0, None, '%', 3, 6, '300%'),
(17.0, None, '%', 1, 8.5, '6%'),
(17.0, None, '%', 1, 8.4, '5.9%'),
(5, None, '%', -100, 0.000001, '-2000.00000%'),
# Check percent symbol
(1.0, 2, None, 1.2, 100, '120.00'),
(75, 3, '', 50, 100, '66.667'),
(42, None, '^^Foobar$$', 21, 12, '50.0^^Foobar$$'),
]
percent_ids = [
# Check explicitly set decimals over different intervals and values
'decimals=0, x>100%',
'decimals=0, x=100%',
'decimals=0, x<100%',
'decimals=0, x<0%',
'decimals=1, x>100%',
'decimals=1, x=100%',
'decimals=1, x<100%',
'decimals=1, x<0%',
# Check auto decimals over different intervals and values
'autodecimal, x<100%, display_range=1',
'autodecimal, x>100%, display_range=6 (custom xmax test)',
'autodecimal, x<100%, display_range=8.5 (autodecimal test 1)',
'autodecimal, x<100%, display_range=8.4 (autodecimal test 2)',
'autodecimal, x<-100%, display_range=1e-6 (tiny display range)',
# Check percent symbol
'None as percent symbol',
'Empty percent symbol',
'Custom percent symbol',
]
latex_data = [
(False, False, r'50\{t}%'),
(False, True, r'50\\\{t\}\%'),
(True, False, r'50\{t}%'),
(True, True, r'50\{t}%'),
]
@pytest.mark.parametrize(
'xmax, decimals, symbol, x, display_range, expected',
percent_data, ids=percent_ids)
def test_basic(self, xmax, decimals, symbol,
x, display_range, expected):
formatter = mticker.PercentFormatter(xmax, decimals, symbol)
with matplotlib.rc_context(rc={'text.usetex': False}):
assert formatter.format_pct(x, display_range) == expected
@pytest.mark.parametrize('is_latex, usetex, expected', latex_data)
def test_latex(self, is_latex, usetex, expected):
fmt = mticker.PercentFormatter(symbol='\\{t}%', is_latex=is_latex)
with matplotlib.rc_context(rc={'text.usetex': usetex}):
assert fmt.format_pct(50, 100) == expected
def test_majformatter_type():
fig, ax = plt.subplots()
with pytest.raises(TypeError):
ax.xaxis.set_major_formatter(matplotlib.ticker.LogLocator())
def test_minformatter_type():
fig, ax = plt.subplots()
with pytest.raises(TypeError):
ax.xaxis.set_minor_formatter(matplotlib.ticker.LogLocator())
def test_majlocator_type():
fig, ax = plt.subplots()
with pytest.raises(TypeError):
ax.xaxis.set_major_locator(matplotlib.ticker.LogFormatter())
def test_minlocator_type():
fig, ax = plt.subplots()
with pytest.raises(TypeError):
ax.xaxis.set_minor_locator(matplotlib.ticker.LogFormatter())
def test_minorticks_rc():
fig = plt.figure()
def minorticksubplot(xminor, yminor, i):
rc = {'xtick.minor.visible': xminor,
'ytick.minor.visible': yminor}
with plt.rc_context(rc=rc):
ax = fig.add_subplot(2, 2, i)
assert (len(ax.xaxis.get_minor_ticks()) > 0) == xminor
assert (len(ax.yaxis.get_minor_ticks()) > 0) == yminor
minorticksubplot(False, False, 1)
minorticksubplot(True, False, 2)
minorticksubplot(False, True, 3)
minorticksubplot(True, True, 4)
@pytest.mark.parametrize('remove_overlapping_locs, expected_num',
((True, 6),
(None, 6), # this tests the default
(False, 9)))
def test_remove_overlap(remove_overlapping_locs, expected_num):
import numpy as np
import matplotlib.dates as mdates
t = np.arange("2018-11-03", "2018-11-06", dtype="datetime64")
x = np.ones(len(t))
fig, ax = plt.subplots()
ax.plot(t, x)
ax.xaxis.set_major_locator(mdates.DayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('\n%a'))
ax.xaxis.set_minor_locator(mdates.HourLocator((0, 6, 12, 18)))
ax.xaxis.set_minor_formatter(mdates.DateFormatter('%H:%M'))
# force there to be extra ticks
ax.xaxis.get_minor_ticks(15)
if remove_overlapping_locs is not None:
ax.xaxis.remove_overlapping_locs = remove_overlapping_locs
# check that getter/setter exists
current = ax.xaxis.remove_overlapping_locs
assert (current == ax.xaxis.get_remove_overlapping_locs())
plt.setp(ax.xaxis, remove_overlapping_locs=current)
new = ax.xaxis.remove_overlapping_locs
assert (new == ax.xaxis.remove_overlapping_locs)
# check that the accessors filter correctly
# this is the method that does the actual filtering
assert len(ax.xaxis.get_minorticklocs()) == expected_num
# these three are derivative
assert len(ax.xaxis.get_minor_ticks()) == expected_num
assert len(ax.xaxis.get_minorticklabels()) == expected_num
assert len(ax.xaxis.get_minorticklines()) == expected_num*2
# force a draw to call _update_ticks under the hood
fig.canvas.draw()
# check that the correct number of ticks report them selves as
# visible
assert sum(t.get_visible() for t in ax.xaxis.minorTicks) == expected_num
|
2ffb3a5ae0de875d9257c3bab4b304a4830055cfa47b43c18c0aa289017b82ae
|
import io
import re
import numpy as np
import pytest
import matplotlib
from matplotlib.testing.decorators import check_figures_equal, image_comparison
import matplotlib.pyplot as plt
from matplotlib import mathtext
math_tests = [
r'$a+b+\dot s+\dot{s}+\ldots$',
r'$x \doteq y$',
r'\$100.00 $\alpha \_$',
r'$\frac{\$100.00}{y}$',
r'$x y$',
r'$x+y\ x=y\ x<y\ x:y\ x,y\ x@y$',
r'$100\%y\ x*y\ x/y x\$y$',
r'$x\leftarrow y\ x\forall y\ x-y$',
r'$x \sf x \bf x {\cal X} \rm x$',
r'$x\ x\,x\;x\quad x\qquad x\!x\hspace{ 0.5 }y$',
r'$\{ \rm braces \}$',
r'$\left[\left\lfloor\frac{5}{\frac{\left(3\right)}{4}} y\right)\right]$',
r'$\left(x\right)$',
r'$\sin(x)$',
r'$x_2$',
r'$x^2$',
r'$x^2_y$',
r'$x_y^2$',
r'$\prod_{i=\alpha_{i+1}}^\infty$',
r'$x = \frac{x+\frac{5}{2}}{\frac{y+3}{8}}$',
r'$dz/dt = \gamma x^2 + {\rm sin}(2\pi y+\phi)$',
r'Foo: $\alpha_{i+1}^j = {\rm sin}(2\pi f_j t_i) e^{-5 t_i/\tau}$',
r'$\mathcal{R}\prod_{i=\alpha_{i+1}}^\infty a_i \sin(2 \pi f x_i)$',
r'Variable $i$ is good',
r'$\Delta_i^j$',
r'$\Delta^j_{i+1}$',
r'$\ddot{o}\acute{e}\grave{e}\hat{O}\breve{\imath}\tilde{n}\vec{q}$',
r"$\arccos((x^i))$",
r"$\gamma = \frac{x=\frac{6}{8}}{y} \delta$",
r'$\limsup_{x\to\infty}$',
r'$\oint^\infty_0$',
r"$f'\quad f'''(x)\quad ''/\mathrm{yr}$",
r'$\frac{x_2888}{y}$',
r"$\sqrt[3]{\frac{X_2}{Y}}=5$",
r"$\sqrt[5]{\prod^\frac{x}{2\pi^2}_\infty}$",
r"$\sqrt[3]{x}=5$",
r'$\frac{X}{\frac{X}{Y}}$',
r"$W^{3\beta}_{\delta_1 \rho_1 \sigma_2} = U^{3\beta}_{\delta_1 \rho_1} + \frac{1}{8 \pi 2} \int^{\alpha_2}_{\alpha_2} d \alpha^\prime_2 \left[\frac{ U^{2\beta}_{\delta_1 \rho_1} - \alpha^\prime_2U^{1\beta}_{\rho_1 \sigma_2} }{U^{0\beta}_{\rho_1 \sigma_2}}\right]$",
r'$\mathcal{H} = \int d \tau \left(\epsilon E^2 + \mu H^2\right)$',
r'$\widehat{abc}\widetilde{def}$',
'$\\Gamma \\Delta \\Theta \\Lambda \\Xi \\Pi \\Sigma \\Upsilon \\Phi \\Psi \\Omega$',
'$\\alpha \\beta \\gamma \\delta \\epsilon \\zeta \\eta \\theta \\iota \\lambda \\mu \\nu \\xi \\pi \\kappa \\rho \\sigma \\tau \\upsilon \\phi \\chi \\psi$',
# The examples prefixed by 'mmltt' are from the MathML torture test here:
# http://www.mozilla.org/projects/mathml/demo/texvsmml.xhtml
r'${x}^{2}{y}^{2}$',
r'${}_{2}F_{3}$',
r'$\frac{x+{y}^{2}}{k+1}$',
r'$x+{y}^{\frac{2}{k+1}}$',
r'$\frac{a}{b/2}$',
r'${a}_{0}+\frac{1}{{a}_{1}+\frac{1}{{a}_{2}+\frac{1}{{a}_{3}+\frac{1}{{a}_{4}}}}}$',
r'${a}_{0}+\frac{1}{{a}_{1}+\frac{1}{{a}_{2}+\frac{1}{{a}_{3}+\frac{1}{{a}_{4}}}}}$',
r'$\binom{n}{k/2}$',
r'$\binom{p}{2}{x}^{2}{y}^{p-2}-\frac{1}{1-x}\frac{1}{1-{x}^{2}}$',
r'${x}^{2y}$',
r'$\sum _{i=1}^{p}\sum _{j=1}^{q}\sum _{k=1}^{r}{a}_{ij}{b}_{jk}{c}_{ki}$',
r'$\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+x}}}}}}}$',
r'$\left(\frac{{\partial }^{2}}{\partial {x}^{2}}+\frac{{\partial }^{2}}{\partial {y}^{2}}\right){|\varphi \left(x+iy\right)|}^{2}=0$',
r'${2}^{{2}^{{2}^{x}}}$',
r'${\int }_{1}^{x}\frac{\mathrm{dt}}{t}$',
r'$\int {\int }_{D}\mathrm{dx} \mathrm{dy}$',
# mathtex doesn't support array
# 'mmltt18' : r'$f\left(x\right)=\left\{\begin{array}{cc}\hfill 1/3\hfill & \text{if_}0\le x\le 1;\hfill \\ \hfill 2/3\hfill & \hfill \text{if_}3\le x\le 4;\hfill \\ \hfill 0\hfill & \text{elsewhere.}\hfill \end{array}$',
# mathtex doesn't support stackrel
# 'mmltt19' : ur'$\stackrel{\stackrel{k\text{times}}{\ufe37}}{x+...+x}$',
r'${y}_{{x}^{2}}$',
# mathtex doesn't support the "\text" command
# 'mmltt21' : r'$\sum _{p\text{\prime}}f\left(p\right)={\int }_{t>1}f\left(t\right) d\pi \left(t\right)$',
# mathtex doesn't support array
# 'mmltt23' : r'$\left(\begin{array}{cc}\hfill \left(\begin{array}{cc}\hfill a\hfill & \hfill b\hfill \\ \hfill c\hfill & \hfill d\hfill \end{array}\right)\hfill & \hfill \left(\begin{array}{cc}\hfill e\hfill & \hfill f\hfill \\ \hfill g\hfill & \hfill h\hfill \end{array}\right)\hfill \\ \hfill 0\hfill & \hfill \left(\begin{array}{cc}\hfill i\hfill & \hfill j\hfill \\ \hfill k\hfill & \hfill l\hfill \end{array}\right)\hfill \end{array}\right)$',
# mathtex doesn't support array
# 'mmltt24' : r'$det|\begin{array}{ccccc}\hfill {c}_{0}\hfill & \hfill {c}_{1}\hfill & \hfill {c}_{2}\hfill & \hfill \dots \hfill & \hfill {c}_{n}\hfill \\ \hfill {c}_{1}\hfill & \hfill {c}_{2}\hfill & \hfill {c}_{3}\hfill & \hfill \dots \hfill & \hfill {c}_{n+1}\hfill \\ \hfill {c}_{2}\hfill & \hfill {c}_{3}\hfill & \hfill {c}_{4}\hfill & \hfill \dots \hfill & \hfill {c}_{n+2}\hfill \\ \hfill \u22ee\hfill & \hfill \u22ee\hfill & \hfill \u22ee\hfill & \hfill \hfill & \hfill \u22ee\hfill \\ \hfill {c}_{n}\hfill & \hfill {c}_{n+1}\hfill & \hfill {c}_{n+2}\hfill & \hfill \dots \hfill & \hfill {c}_{2n}\hfill \end{array}|>0$',
r'${y}_{{x}_{2}}$',
r'${x}_{92}^{31415}+\pi $',
r'${x}_{{y}_{b}^{a}}^{{z}_{c}^{d}}$',
r'${y}_{3}^{\prime \prime \prime }$',
r"$\left( \xi \left( 1 - \xi \right) \right)$", # Bug 2969451
r"$\left(2 \, a=b\right)$", # Sage bug #8125
r"$? ! &$", # github issue #466
r'$\operatorname{cos} x$', # github issue #553
r'$\sum _{\genfrac{}{}{0}{}{0\leq i\leq m}{0<j<n}}P\left(i,j\right)$',
r"$\left\Vert a \right\Vert \left\vert b \right\vert \left| a \right| \left\| b\right\| \Vert a \Vert \vert b \vert$",
r'$\mathring{A} \stackrel{\circ}{A} \AA$',
r'$M \, M \thinspace M \/ M \> M \: M \; M \ M \enspace M \quad M \qquad M \! M$',
r'$\Cup$ $\Cap$ $\leftharpoonup$ $\barwedge$ $\rightharpoonup$',
r'$\dotplus$ $\doteq$ $\doteqdot$ $\ddots$',
r'$xyz^kx_kx^py^{p-2} d_i^jb_jc_kd x^j_i E^0 E^0_u$', # github issue #4873
r'${xyz}^k{x}_{k}{x}^{p}{y}^{p-2} {d}_{i}^{j}{b}_{j}{c}_{k}{d} {x}^{j}_{i}{E}^{0}{E}^0_u$',
r'${\int}_x^x x\oint_x^x x\int_{X}^{X}x\int_x x \int^x x \int_{x} x\int^{x}{\int}_{x} x{\int}^{x}_{x}x$',
r'testing$^{123}$',
' '.join('$\\' + p + '$' for p in sorted(mathtext.Parser._snowflake)),
r'$6-2$; $-2$; $ -2$; ${-2}$; ${ -2}$; $20^{+3}_{-2}$',
r'$\overline{\omega}^x \frac{1}{2}_0^x$', # github issue #5444
r'$,$ $.$ $1{,}234{, }567{ , }890$ and $1,234,567,890$', # github issue 5799
r'$\left(X\right)_{a}^{b}$', # github issue 7615
r'$\dfrac{\$100.00}{y}$', # github issue #1888
]
digits = "0123456789"
uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
lowercase = "abcdefghijklmnopqrstuvwxyz"
uppergreek = ("\\Gamma \\Delta \\Theta \\Lambda \\Xi \\Pi \\Sigma \\Upsilon \\Phi \\Psi "
"\\Omega")
lowergreek = ("\\alpha \\beta \\gamma \\delta \\epsilon \\zeta \\eta \\theta \\iota "
"\\lambda \\mu \\nu \\xi \\pi \\kappa \\rho \\sigma \\tau \\upsilon "
"\\phi \\chi \\psi")
all = [digits, uppercase, lowercase, uppergreek, lowergreek]
font_test_specs = [
([], all),
(['mathrm'], all),
(['mathbf'], all),
(['mathit'], all),
(['mathtt'], [digits, uppercase, lowercase]),
(['mathcircled'], [digits, uppercase, lowercase]),
(['mathrm', 'mathcircled'], [digits, uppercase, lowercase]),
(['mathbf', 'mathcircled'], [digits, uppercase, lowercase]),
(['mathbb'], [digits, uppercase, lowercase,
r'\Gamma \Pi \Sigma \gamma \pi']),
(['mathrm', 'mathbb'], [digits, uppercase, lowercase,
r'\Gamma \Pi \Sigma \gamma \pi']),
(['mathbf', 'mathbb'], [digits, uppercase, lowercase,
r'\Gamma \Pi \Sigma \gamma \pi']),
(['mathcal'], [uppercase]),
(['mathfrak'], [uppercase, lowercase]),
(['mathbf', 'mathfrak'], [uppercase, lowercase]),
(['mathscr'], [uppercase, lowercase]),
(['mathsf'], [digits, uppercase, lowercase]),
(['mathrm', 'mathsf'], [digits, uppercase, lowercase]),
(['mathbf', 'mathsf'], [digits, uppercase, lowercase])
]
font_tests = []
for fonts, chars in font_test_specs:
wrapper = [' '.join(fonts), ' $']
for font in fonts:
wrapper.append(r'\%s{' % font)
wrapper.append('%s')
for font in fonts:
wrapper.append('}')
wrapper.append('$')
wrapper = ''.join(wrapper)
for set in chars:
font_tests.append(wrapper % set)
@pytest.fixture
def baseline_images(request, fontset, index):
return ['%s_%s_%02d' % (request.param, fontset, index)]
# In the following two tests, use recwarn to suppress warnings regarding the
# deprecation of \stackrel and \mathcircled.
@pytest.mark.parametrize('index, test', enumerate(math_tests),
ids=[str(index) for index in range(len(math_tests))])
@pytest.mark.parametrize('fontset',
['cm', 'stix', 'stixsans', 'dejavusans',
'dejavuserif'])
@pytest.mark.parametrize('baseline_images', ['mathtext'], indirect=True)
@image_comparison(baseline_images=None)
def test_mathtext_rendering(baseline_images, fontset, index, test, recwarn):
matplotlib.rcParams['mathtext.fontset'] = fontset
fig = plt.figure(figsize=(5.25, 0.75))
fig.text(0.5, 0.5, test,
horizontalalignment='center', verticalalignment='center')
@pytest.mark.parametrize('index, test', enumerate(font_tests),
ids=[str(index) for index in range(len(font_tests))])
@pytest.mark.parametrize('fontset',
['cm', 'stix', 'stixsans', 'dejavusans',
'dejavuserif'])
@pytest.mark.parametrize('baseline_images', ['mathfont'], indirect=True)
@image_comparison(baseline_images=None, extensions=['png'])
def test_mathfont_rendering(baseline_images, fontset, index, test, recwarn):
matplotlib.rcParams['mathtext.fontset'] = fontset
fig = plt.figure(figsize=(5.25, 0.75))
fig.text(0.5, 0.5, test,
horizontalalignment='center', verticalalignment='center')
def test_fontinfo():
import matplotlib.font_manager as font_manager
import matplotlib.ft2font as ft2font
fontpath = font_manager.findfont("DejaVu Sans")
font = ft2font.FT2Font(fontpath)
table = font.get_sfnt_table("head")
assert table['version'] == (1, 0)
@pytest.mark.parametrize(
'math, msg',
[
(r'$\hspace{}$', r'Expected \hspace{n}'),
(r'$\hspace{foo}$', r'Expected \hspace{n}'),
(r'$\frac$', r'Expected \frac{num}{den}'),
(r'$\frac{}{}$', r'Expected \frac{num}{den}'),
(r'$\stackrel$', r'Expected \stackrel{num}{den}'),
(r'$\stackrel{}{}$', r'Expected \stackrel{num}{den}'),
(r'$\binom$', r'Expected \binom{num}{den}'),
(r'$\binom{}{}$', r'Expected \binom{num}{den}'),
(r'$\genfrac$',
r'Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}'),
(r'$\genfrac{}{}{}{}{}{}$',
r'Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}'),
(r'$\sqrt$', r'Expected \sqrt{value}'),
(r'$\sqrt f$', r'Expected \sqrt{value}'),
(r'$\overline$', r'Expected \overline{value}'),
(r'$\overline{}$', r'Expected \overline{value}'),
(r'$\leftF$', r'Expected a delimiter'),
(r'$\rightF$', r'Unknown symbol: \rightF'),
(r'$\left(\right$', r'Expected a delimiter'),
(r'$\left($', r'Expected "\right"'),
(r'$\dfrac$', r'Expected \dfrac{num}{den}'),
(r'$\dfrac{}{}$', r'Expected \dfrac{num}{den}'),
],
ids=[
'hspace without value',
'hspace with invalid value',
'frac without parameters',
'frac with empty parameters',
'stackrel without parameters',
'stackrel with empty parameters',
'binom without parameters',
'binom with empty parameters',
'genfrac without parameters',
'genfrac with empty parameters',
'sqrt without parameters',
'sqrt with invalid value',
'overline without parameters',
'overline with empty parameter',
'left with invalid delimiter',
'right with invalid delimiter',
'unclosed parentheses with sizing',
'unclosed parentheses without sizing',
'dfrac without parameters',
'dfrac with empty parameters',
]
)
def test_mathtext_exceptions(math, msg):
parser = mathtext.MathTextParser('agg')
with pytest.raises(ValueError) as excinfo:
parser.parse(math)
excinfo.match(re.escape(msg))
def test_single_minus_sign():
plt.figure(figsize=(0.3, 0.3))
plt.text(0.5, 0.5, '$-$')
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.gca().set_xticks([])
plt.gca().set_yticks([])
buff = io.BytesIO()
plt.savefig(buff, format="rgba", dpi=1000)
array = np.frombuffer(buff.getvalue(), dtype=np.uint8)
# If this fails, it would be all white
assert not np.all(array == 0xff)
@check_figures_equal(extensions=["png"])
def test_spaces(fig_test, fig_ref):
fig_test.subplots().set_title(r"$1\,2\>3\ 4$")
fig_ref.subplots().set_title(r"$1\/2\:3~4$")
|
79ab31a0bfe30597b90a113b790723cfa6aac782cfc5075ef45911b16a0e8392
|
import numpy as np
import pytest
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import ticker, rcParams
def example_plot(ax, fontsize=12, nodec=False):
ax.plot([1, 2])
ax.locator_params(nbins=3)
if not nodec:
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
else:
ax.set_xticklabels('')
ax.set_yticklabels('')
def example_pcolor(ax, fontsize=12):
dx, dy = 0.6, 0.6
y, x = np.mgrid[slice(-3, 3 + dy, dy),
slice(-3, 3 + dx, dx)]
z = (1 - x / 2. + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)
pcm = ax.pcolormesh(x, y, z, cmap='RdBu_r', vmin=-1., vmax=1.,
rasterized=True)
# ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
return pcm
@image_comparison(baseline_images=['constrained_layout1'],
extensions=['png'])
def test_constrained_layout1():
'Test constrained_layout for a single subplot'
fig = plt.figure(constrained_layout=True)
ax = fig.add_subplot(111)
example_plot(ax, fontsize=24)
@image_comparison(baseline_images=['constrained_layout2'],
extensions=['png'])
def test_constrained_layout2():
'Test constrained_layout for 2x2 subplots'
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for ax in axs.flatten():
example_plot(ax, fontsize=24)
@image_comparison(baseline_images=['constrained_layout3'],
extensions=['png'])
def test_constrained_layout3():
'Test constrained_layout for colorbars with subplots'
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for nn, ax in enumerate(axs.flatten()):
pcm = example_pcolor(ax, fontsize=24)
if nn == 3:
pad = 0.08
else:
pad = 0.02 # default
fig.colorbar(pcm, ax=ax, pad=pad)
@image_comparison(baseline_images=['constrained_layout4'])
def test_constrained_layout4():
'Test constrained_layout for a single colorbar with subplots'
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for ax in axs.flatten():
pcm = example_pcolor(ax, fontsize=24)
fig.colorbar(pcm, ax=axs, pad=0.01, shrink=0.6)
@image_comparison(baseline_images=['constrained_layout5'],
tol=5.e-2, extensions=['png'])
def test_constrained_layout5():
'''
Test constrained_layout for a single colorbar with subplots,
colorbar bottom
'''
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for ax in axs.flatten():
pcm = example_pcolor(ax, fontsize=24)
fig.colorbar(pcm, ax=axs,
use_gridspec=False, pad=0.01, shrink=0.6,
location='bottom')
@image_comparison(baseline_images=['constrained_layout6'],
extensions=['png'])
def test_constrained_layout6():
'Test constrained_layout for nested gridspecs'
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(1, 2, figure=fig)
gsl = gs[0].subgridspec(2, 2)
gsr = gs[1].subgridspec(1, 2)
axsl = []
for gs in gsl:
ax = fig.add_subplot(gs)
axsl += [ax]
example_plot(ax, fontsize=12)
ax.set_xlabel('x-label\nMultiLine')
axsr = []
for gs in gsr:
ax = fig.add_subplot(gs)
axsr += [ax]
pcm = example_pcolor(ax, fontsize=12)
fig.colorbar(pcm, ax=axsr,
pad=0.01, shrink=0.99, location='bottom',
ticks=ticker.MaxNLocator(nbins=5))
def test_constrained_layout7():
'Test for proper warning if fig not set in GridSpec'
with pytest.warns(UserWarning, match='Calling figure.constrained_layout, '
'but figure not setup to do constrained layout'):
fig = plt.figure(constrained_layout=True)
gs = gridspec.GridSpec(1, 2)
gsl = gridspec.GridSpecFromSubplotSpec(2, 2, gs[0])
gsr = gridspec.GridSpecFromSubplotSpec(1, 2, gs[1])
axsl = []
for gs in gsl:
ax = fig.add_subplot(gs)
# need to trigger a draw to get warning
fig.draw(fig.canvas.get_renderer())
@image_comparison(baseline_images=['constrained_layout8'],
extensions=['png'])
def test_constrained_layout8():
'Test for gridspecs that are not completely full'
fig = plt.figure(figsize=(10, 5), constrained_layout=True)
gs = gridspec.GridSpec(3, 5, figure=fig)
axs = []
for j in [0, 1]:
if j == 0:
ilist = [1]
else:
ilist = [0, 4]
for i in ilist:
ax = fig.add_subplot(gs[j, i])
axs += [ax]
pcm = example_pcolor(ax, fontsize=9)
if i > 0:
ax.set_ylabel('')
if j < 1:
ax.set_xlabel('')
ax.set_title('')
ax = fig.add_subplot(gs[2, :])
axs += [ax]
pcm = example_pcolor(ax, fontsize=9)
fig.colorbar(pcm, ax=axs, pad=0.01, shrink=0.6)
@image_comparison(baseline_images=['constrained_layout9'],
extensions=['png'])
def test_constrained_layout9():
'Test for handling suptitle and for sharex and sharey'
fig, axs = plt.subplots(2, 2, constrained_layout=True,
sharex=False, sharey=False)
for ax in axs.flatten():
pcm = example_pcolor(ax, fontsize=24)
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_aspect(2.)
fig.colorbar(pcm, ax=axs, pad=0.01, shrink=0.6)
fig.suptitle('Test Suptitle', fontsize=28)
@image_comparison(baseline_images=['constrained_layout10'],
extensions=['png'])
def test_constrained_layout10():
'Test for handling legend outside axis'
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for ax in axs.flatten():
ax.plot(np.arange(12), label='This is a label')
ax.legend(loc='center left', bbox_to_anchor=(0.8, 0.5))
@image_comparison(baseline_images=['constrained_layout11'],
extensions=['png'])
def test_constrained_layout11():
'Test for multiple nested gridspecs '
fig = plt.figure(constrained_layout=True, figsize=(10, 3))
gs0 = gridspec.GridSpec(1, 2, figure=fig)
gsl = gridspec.GridSpecFromSubplotSpec(1, 2, gs0[0])
gsl0 = gridspec.GridSpecFromSubplotSpec(2, 2, gsl[1])
ax = fig.add_subplot(gs0[1])
example_plot(ax, fontsize=9)
axs = []
for gs in gsl0:
ax = fig.add_subplot(gs)
axs += [ax]
pcm = example_pcolor(ax, fontsize=9)
fig.colorbar(pcm, ax=axs, shrink=0.6, aspect=70.)
ax = fig.add_subplot(gsl[0])
example_plot(ax, fontsize=9)
@image_comparison(baseline_images=['constrained_layout11rat'],
extensions=['png'])
def test_constrained_layout11rat():
'Test for multiple nested gridspecs with width_ratios'
fig = plt.figure(constrained_layout=True, figsize=(10, 3))
gs0 = gridspec.GridSpec(1, 2, figure=fig, width_ratios=[6., 1.])
gsl = gridspec.GridSpecFromSubplotSpec(1, 2, gs0[0])
gsl0 = gridspec.GridSpecFromSubplotSpec(2, 2, gsl[1],
height_ratios=[2., 1.])
ax = fig.add_subplot(gs0[1])
example_plot(ax, fontsize=9)
axs = []
for gs in gsl0:
ax = fig.add_subplot(gs)
axs += [ax]
pcm = example_pcolor(ax, fontsize=9)
fig.colorbar(pcm, ax=axs, shrink=0.6, aspect=70.)
ax = fig.add_subplot(gsl[0])
example_plot(ax, fontsize=9)
@image_comparison(baseline_images=['constrained_layout12'],
extensions=['png'])
def test_constrained_layout12():
'Test that very unbalanced labeling still works.'
fig = plt.figure(constrained_layout=True)
gs0 = gridspec.GridSpec(6, 2, figure=fig)
ax1 = fig.add_subplot(gs0[:3, 1])
ax2 = fig.add_subplot(gs0[3:, 1])
example_plot(ax1, fontsize=24)
example_plot(ax2, fontsize=24)
ax = fig.add_subplot(gs0[0:2, 0])
example_plot(ax, nodec=True)
ax = fig.add_subplot(gs0[2:4, 0])
example_plot(ax, nodec=True)
ax = fig.add_subplot(gs0[4:, 0])
example_plot(ax, nodec=True)
ax.set_xlabel('x-label')
@image_comparison(baseline_images=['constrained_layout13'], tol=2.e-2,
extensions=['png'])
def test_constrained_layout13():
'Test that padding works.'
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for ax in axs.flatten():
pcm = example_pcolor(ax, fontsize=12)
fig.colorbar(pcm, ax=ax, shrink=0.6, aspect=20., pad=0.02)
fig.set_constrained_layout_pads(w_pad=24./72., h_pad=24./72.)
@image_comparison(baseline_images=['constrained_layout14'],
extensions=['png'])
def test_constrained_layout14():
'Test that padding works.'
fig, axs = plt.subplots(2, 2, constrained_layout=True)
for ax in axs.flatten():
pcm = example_pcolor(ax, fontsize=12)
fig.colorbar(pcm, ax=ax, shrink=0.6, aspect=20., pad=0.02)
fig.set_constrained_layout_pads(
w_pad=3./72., h_pad=3./72.,
hspace=0.2, wspace=0.2)
@image_comparison(baseline_images=['constrained_layout15'],
extensions=['png'])
def test_constrained_layout15():
'Test that rcparams work.'
rcParams['figure.constrained_layout.use'] = True
fig, axs = plt.subplots(2, 2)
for ax in axs.flatten():
example_plot(ax, fontsize=12)
@image_comparison(baseline_images=['constrained_layout16'],
extensions=['png'])
def test_constrained_layout16():
'Test ax.set_position.'
fig, ax = plt.subplots(constrained_layout=True)
example_plot(ax, fontsize=12)
ax2 = fig.add_axes([0.2, 0.2, 0.4, 0.4])
@image_comparison(baseline_images=['constrained_layout17'],
extensions=['png'])
def test_constrained_layout17():
'Test uneven gridspecs'
fig = plt.figure(constrained_layout=True)
gs = gridspec.GridSpec(3, 3, figure=fig)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1:])
ax3 = fig.add_subplot(gs[1:, 0:2])
ax4 = fig.add_subplot(gs[1:, -1])
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
def test_constrained_layout18():
'Test twinx'
fig, ax = plt.subplots(constrained_layout=True)
ax2 = ax.twinx()
example_plot(ax)
example_plot(ax2, fontsize=24)
fig.canvas.draw()
assert all(ax.get_position().extents == ax2.get_position().extents)
def test_constrained_layout19():
'Test twiny'
fig, ax = plt.subplots(constrained_layout=True)
ax2 = ax.twiny()
example_plot(ax)
example_plot(ax2, fontsize=24)
ax2.set_title('')
ax.set_title('')
fig.canvas.draw()
assert all(ax.get_position().extents == ax2.get_position().extents)
def test_constrained_layout20():
'Smoke test cl does not mess up added axes'
gx = np.linspace(-5, 5, 4)
img = np.hypot(gx, gx[:, None])
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
mesh = ax.pcolormesh(gx, gx, img)
fig.colorbar(mesh)
def test_constrained_layout21():
'#11035: repeated calls to suptitle should not alter the layout'
fig, ax = plt.subplots(constrained_layout=True)
fig.suptitle("Suptitle0")
fig.canvas.draw()
extents0 = np.copy(ax.get_position().extents)
fig.suptitle("Suptitle1")
fig.canvas.draw()
extents1 = np.copy(ax.get_position().extents)
np.testing.assert_allclose(extents0, extents1)
def test_constrained_layout22():
'#11035: suptitle should not be include in CL if manually positioned'
fig, ax = plt.subplots(constrained_layout=True)
fig.canvas.draw()
extents0 = np.copy(ax.get_position().extents)
fig.suptitle("Suptitle", y=0.5)
fig.canvas.draw()
extents1 = np.copy(ax.get_position().extents)
np.testing.assert_allclose(extents0, extents1)
def test_constrained_layout23():
'''
Comment in #11035: suptitle used to cause an exception when
reusing a figure w/ CL with ``clear=True``.
'''
for i in range(2):
fig, ax = plt.subplots(num="123", constrained_layout=True, clear=True)
fig.suptitle("Suptitle{}".format(i))
# This test occasionally fails the image comparison tests, so we mark as
# flaky. Apparently the constraint solver occasionally doesn't fully
# optimize. Would be nice if this were more deterministic...
@pytest.mark.timeout(30)
@pytest.mark.flaky(reruns=3)
@image_comparison(baseline_images=['test_colorbar_location'],
extensions=['png'], remove_text=True, style='mpl20')
def test_colorbar_location():
"""
Test that colorbar handling is as expected for various complicated
cases...
"""
fig, axs = plt.subplots(4, 5, constrained_layout=True)
for ax in axs.flatten():
pcm = example_pcolor(ax)
ax.set_xlabel('')
ax.set_ylabel('')
fig.colorbar(pcm, ax=axs[:, 1], shrink=0.4)
fig.colorbar(pcm, ax=axs[-1, :2], shrink=0.5, location='bottom')
fig.colorbar(pcm, ax=axs[0, 2:], shrink=0.5, location='bottom')
fig.colorbar(pcm, ax=axs[-2, 3:], shrink=0.5, location='top')
fig.colorbar(pcm, ax=axs[0, 0], shrink=0.5, location='left')
fig.colorbar(pcm, ax=axs[1:3, 2], shrink=0.5, location='right')
|
91393d42feaaf4a9377c97030045a6df91c5fac2ac3fa61b79ce8df231076ac1
|
import json
from pathlib import Path
import shutil
import matplotlib.dviread as dr
import pytest
def test_PsfontsMap(monkeypatch):
monkeypatch.setattr(dr, 'find_tex_file', lambda x: x)
filename = str(Path(__file__).parent / 'baseline_images/dviread/test.map')
fontmap = dr.PsfontsMap(filename)
# Check all properties of a few fonts
for n in [1, 2, 3, 4, 5]:
key = b'TeXfont%d' % n
entry = fontmap[key]
assert entry.texname == key
assert entry.psname == b'PSfont%d' % n
if n not in [3, 5]:
assert entry.encoding == b'font%d.enc' % n
elif n == 3:
assert entry.encoding == b'enc3.foo'
# We don't care about the encoding of TeXfont5, which specifies
# multiple encodings.
if n not in [1, 5]:
assert entry.filename == b'font%d.pfa' % n
else:
assert entry.filename == b'font%d.pfb' % n
if n == 4:
assert entry.effects == {'slant': -0.1, 'extend': 2.2}
else:
assert entry.effects == {}
# Some special cases
entry = fontmap[b'TeXfont6']
assert entry.filename is None
assert entry.encoding is None
entry = fontmap[b'TeXfont7']
assert entry.filename is None
assert entry.encoding == b'font7.enc'
entry = fontmap[b'TeXfont8']
assert entry.filename == b'font8.pfb'
assert entry.encoding is None
entry = fontmap[b'TeXfont9']
assert entry.filename == b'/absolute/font9.pfb'
# Missing font
with pytest.raises(KeyError) as exc:
fontmap[b'no-such-font']
assert 'no-such-font' in str(exc.value)
@pytest.mark.skipif(shutil.which("kpsewhich") is None,
reason="kpsewhich is not available")
def test_dviread():
dirpath = Path(__file__).parent / 'baseline_images/dviread'
with (dirpath / 'test.json').open() as f:
correct = json.load(f)
with dr.Dvi(str(dirpath / 'test.dvi'), None) as dvi:
data = [{'text': [[t.x, t.y,
chr(t.glyph),
t.font.texname.decode('ascii'),
round(t.font.size, 2)]
for t in page.text],
'boxes': [[b.x, b.y, b.height, b.width] for b in page.boxes]}
for page in dvi]
assert data == correct
|
07f74b9165cf6ff31b8bece1a9e5870f97ceb74ca4e54e44e9d34ecf4beee22f
|
from collections import namedtuple
from itertools import product
from distutils.version import LooseVersion
import io
import platform
import datetime
import dateutil.tz as dutz
import numpy as np
from numpy import ma
from cycler import cycler
import pytest
import warnings
import matplotlib
import matplotlib as mpl
from matplotlib.testing.decorators import (
image_comparison, check_figures_equal, remove_ticks_and_titles)
import matplotlib.pyplot as plt
import matplotlib.markers as mmarkers
import matplotlib.patches as mpatches
import matplotlib.colors as mcolors
import matplotlib.transforms as mtransforms
from numpy.testing import (
assert_allclose, assert_array_equal, assert_array_almost_equal)
from matplotlib import rc_context
from matplotlib.cbook import (
IgnoredKeywordWarning, MatplotlibDeprecationWarning)
# Note: Some test cases are run twice: once normally and once with labeled data
# These two must be defined in the same test function or need to have
# different baseline images to prevent race conditions when pytest runs
# the tests with multiple threads.
def test_get_labels():
fig, ax = plt.subplots()
ax.set_xlabel('x label')
ax.set_ylabel('y label')
assert ax.get_xlabel() == 'x label'
assert ax.get_ylabel() == 'y label'
@image_comparison(baseline_images=['acorr'], extensions=['png'], style='mpl20')
def test_acorr():
np.random.seed(19680801)
n = 512
x = np.random.normal(0, 1, n).cumsum()
fig, ax = plt.subplots()
ax.acorr(x, maxlags=n - 1, label='acorr')
ax.legend()
@image_comparison(baseline_images=['spy'], extensions=['png'], style='mpl20')
def test_spy():
np.random.seed(19680801)
a = np.ones(32 * 32)
a[:16 * 32] = 0
np.random.shuffle(a)
a = np.reshape(a, (32, 32))
fig, ax = plt.subplots()
ax.spy(a)
def test_spy_invalid_kwargs():
fig, ax = plt.subplots()
for unsupported_kw in [{'interpolation': 'nearest'},
{'marker': 'o', 'linestyle': 'solid'}]:
with pytest.raises(TypeError):
ax.spy(np.eye(3, 3), **unsupported_kw)
@image_comparison(baseline_images=['matshow'],
extensions=['png'], style='mpl20')
def test_matshow():
np.random.seed(19680801)
a = np.random.rand(32, 32)
fig, ax = plt.subplots()
ax.matshow(a)
@image_comparison(baseline_images=['formatter_ticker_001',
'formatter_ticker_002',
'formatter_ticker_003',
'formatter_ticker_004',
'formatter_ticker_005',
])
def test_formatter_ticker():
import matplotlib.testing.jpl_units as units
units.register()
# This should affect the tick size. (Tests issue #543)
matplotlib.rcParams['lines.markeredgewidth'] = 30
# This essentially test to see if user specified labels get overwritten
# by the auto labeler functionality of the axes.
xdata = [x*units.sec for x in range(10)]
ydata1 = [(1.5*y - 0.5)*units.km for y in range(10)]
ydata2 = [(1.75*y - 1.0)*units.km for y in range(10)]
fig = plt.figure()
ax = plt.subplot(111)
ax.set_xlabel("x-label 001")
fig = plt.figure()
ax = plt.subplot(111)
ax.set_xlabel("x-label 001")
ax.plot(xdata, ydata1, color='blue', xunits="sec")
fig = plt.figure()
ax = plt.subplot(111)
ax.set_xlabel("x-label 001")
ax.plot(xdata, ydata1, color='blue', xunits="sec")
ax.set_xlabel("x-label 003")
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(xdata, ydata1, color='blue', xunits="sec")
ax.plot(xdata, ydata2, color='green', xunits="hour")
ax.set_xlabel("x-label 004")
# See SF bug 2846058
# https://sourceforge.net/tracker/?func=detail&aid=2846058&group_id=80706&atid=560720
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(xdata, ydata1, color='blue', xunits="sec")
ax.plot(xdata, ydata2, color='green', xunits="hour")
ax.set_xlabel("x-label 005")
ax.autoscale_view()
@image_comparison(baseline_images=["twin_axis_locators_formatters"])
def test_twin_axis_locators_formatters():
vals = np.linspace(0, 1, num=5, endpoint=True)
locs = np.sin(np.pi * vals / 2.0)
majl = plt.FixedLocator(locs)
minl = plt.FixedLocator([0.1, 0.2, 0.3])
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax1.plot([0.1, 100], [0, 1])
ax1.yaxis.set_major_locator(majl)
ax1.yaxis.set_minor_locator(minl)
ax1.yaxis.set_major_formatter(plt.FormatStrFormatter('%08.2lf'))
ax1.yaxis.set_minor_formatter(plt.FixedFormatter(['tricks', 'mind',
'jedi']))
ax1.xaxis.set_major_locator(plt.LinearLocator())
ax1.xaxis.set_minor_locator(plt.FixedLocator([15, 35, 55, 75]))
ax1.xaxis.set_major_formatter(plt.FormatStrFormatter('%05.2lf'))
ax1.xaxis.set_minor_formatter(plt.FixedFormatter(['c', '3', 'p', 'o']))
ax2 = ax1.twiny()
ax3 = ax1.twinx()
def test_twinx_cla():
fig, ax = plt.subplots()
ax2 = ax.twinx()
ax3 = ax2.twiny()
plt.draw()
assert not ax2.xaxis.get_visible()
assert not ax2.patch.get_visible()
ax2.cla()
ax3.cla()
assert not ax2.xaxis.get_visible()
assert not ax2.patch.get_visible()
assert ax2.yaxis.get_visible()
assert ax3.xaxis.get_visible()
assert not ax3.patch.get_visible()
assert not ax3.yaxis.get_visible()
assert ax.xaxis.get_visible()
assert ax.patch.get_visible()
assert ax.yaxis.get_visible()
@image_comparison(baseline_images=['twin_autoscale'], extensions=['png'])
def test_twinx_axis_scales():
x = np.array([0, 0.5, 1])
y = 0.5 * x
x2 = np.array([0, 1, 2])
y2 = 2 * x2
fig = plt.figure()
ax = fig.add_axes((0, 0, 1, 1), autoscalex_on=False, autoscaley_on=False)
ax.plot(x, y, color='blue', lw=10)
ax2 = plt.twinx(ax)
ax2.plot(x2, y2, 'r--', lw=5)
ax.margins(0, 0)
ax2.margins(0, 0)
def test_twin_inherit_autoscale_setting():
fig, ax = plt.subplots()
ax_x_on = ax.twinx()
ax.set_autoscalex_on(False)
ax_x_off = ax.twinx()
assert ax_x_on.get_autoscalex_on()
assert not ax_x_off.get_autoscalex_on()
ax_y_on = ax.twiny()
ax.set_autoscaley_on(False)
ax_y_off = ax.twiny()
assert ax_y_on.get_autoscaley_on()
assert not ax_y_off.get_autoscaley_on()
def test_inverted_cla():
# Github PR #5450. Setting autoscale should reset
# axes to be non-inverted.
# plotting an image, then 1d graph, axis is now down
fig = plt.figure(0)
ax = fig.gca()
# 1. test that a new axis is not inverted per default
assert not ax.xaxis_inverted()
assert not ax.yaxis_inverted()
img = np.random.random((100, 100))
ax.imshow(img)
# 2. test that a image axis is inverted
assert not ax.xaxis_inverted()
assert ax.yaxis_inverted()
# 3. test that clearing and plotting a line, axes are
# not inverted
ax.cla()
x = np.linspace(0, 2*np.pi, 100)
ax.plot(x, np.cos(x))
assert not ax.xaxis_inverted()
assert not ax.yaxis_inverted()
# 4. autoscaling should not bring back axes to normal
ax.cla()
ax.imshow(img)
plt.autoscale()
assert not(ax.xaxis_inverted())
assert ax.yaxis_inverted()
# 5. two shared axes. Clearing the master axis should bring axes in shared
# axes back to normal
ax0 = plt.subplot(211)
ax1 = plt.subplot(212, sharey=ax0)
ax0.imshow(img)
ax1.plot(x, np.cos(x))
ax0.cla()
assert not(ax1.yaxis_inverted())
ax1.cla()
# 6. clearing the nonmaster should not touch limits
ax0.imshow(img)
ax1.plot(x, np.cos(x))
ax1.cla()
assert ax.yaxis_inverted()
# clean up
plt.close(fig)
@check_figures_equal(extensions=["png"])
def test_minorticks_on_rcParams_both(fig_test, fig_ref):
with matplotlib.rc_context({"xtick.minor.visible": True,
"ytick.minor.visible": True}):
ax_test = fig_test.subplots()
ax_test.plot([0, 1], [0, 1])
ax_ref = fig_ref.subplots()
ax_ref.plot([0, 1], [0, 1])
ax_ref.minorticks_on()
@image_comparison(baseline_images=["autoscale_tiny_range"], remove_text=True)
def test_autoscale_tiny_range():
# github pull #904
fig, ax = plt.subplots(2, 2)
ax = ax.flatten()
for i in range(4):
y1 = 10**(-11 - i)
ax[i].plot([0, 1], [1, 1 + y1])
@pytest.mark.style('default')
def test_autoscale_tight():
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3, 4])
ax.autoscale(enable=True, axis='x', tight=False)
ax.autoscale(enable=True, axis='y', tight=True)
assert_allclose(ax.get_xlim(), (-0.15, 3.15))
assert_allclose(ax.get_ylim(), (1.0, 4.0))
@pytest.mark.style('default')
def test_autoscale_log_shared():
# related to github #7587
# array starts at zero to trigger _minpos handling
x = np.arange(100, dtype=float)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.loglog(x, x)
ax2.semilogx(x, x)
ax1.autoscale(tight=True)
ax2.autoscale(tight=True)
plt.draw()
lims = (x[1], x[-1])
assert_allclose(ax1.get_xlim(), lims)
assert_allclose(ax1.get_ylim(), lims)
assert_allclose(ax2.get_xlim(), lims)
assert_allclose(ax2.get_ylim(), (x[0], x[-1]))
@pytest.mark.style('default')
def test_use_sticky_edges():
fig, ax = plt.subplots()
ax.imshow([[0, 1], [2, 3]], origin='lower')
assert_allclose(ax.get_xlim(), (-0.5, 1.5))
assert_allclose(ax.get_ylim(), (-0.5, 1.5))
ax.use_sticky_edges = False
ax.autoscale()
xlim = (-0.5 - 2 * ax._xmargin, 1.5 + 2 * ax._xmargin)
ylim = (-0.5 - 2 * ax._ymargin, 1.5 + 2 * ax._ymargin)
assert_allclose(ax.get_xlim(), xlim)
assert_allclose(ax.get_ylim(), ylim)
# Make sure it is reversible:
ax.use_sticky_edges = True
ax.autoscale()
assert_allclose(ax.get_xlim(), (-0.5, 1.5))
assert_allclose(ax.get_ylim(), (-0.5, 1.5))
@image_comparison(baseline_images=['offset_points'],
remove_text=True)
def test_basic_annotate():
# Setup some data
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2.0*np.pi * t)
# Offset Points
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1, 5), ylim=(-3, 5))
line, = ax.plot(t, s, lw=3, color='purple')
ax.annotate('local max', xy=(3, 1), xycoords='data',
xytext=(3, 3), textcoords='offset points')
@image_comparison(baseline_images=['arrow_simple'],
extensions=['png'], remove_text=True)
def test_arrow_simple():
# Simple image test for ax.arrow
# kwargs that take discrete values
length_includes_head = (True, False)
shape = ('full', 'left', 'right')
head_starts_at_zero = (True, False)
# Create outer product of values
kwargs = product(length_includes_head, shape, head_starts_at_zero)
fig, axs = plt.subplots(3, 4)
for i, (ax, kwarg) in enumerate(zip(axs.flatten(), kwargs)):
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
# Unpack kwargs
(length_includes_head, shape, head_starts_at_zero) = kwarg
theta = 2 * np.pi * i / 12
# Draw arrow
ax.arrow(0, 0, np.sin(theta), np.cos(theta),
width=theta/100,
length_includes_head=length_includes_head,
shape=shape,
head_starts_at_zero=head_starts_at_zero,
head_width=theta / 10,
head_length=theta / 10)
def test_arrow_empty():
_, ax = plt.subplots()
# Create an empty FancyArrow
ax.arrow(0, 0, 0, 0, head_length=0)
def test_annotate_default_arrow():
# Check that we can make an annotation arrow with only default properties.
fig, ax = plt.subplots()
ann = ax.annotate("foo", (0, 1), xytext=(2, 3))
assert ann.arrow_patch is None
ann = ax.annotate("foo", (0, 1), xytext=(2, 3), arrowprops={})
assert ann.arrow_patch is not None
@image_comparison(baseline_images=['polar_axes'], style='default')
def test_polar_annotations():
# you can specify the xypoint and the xytext in different
# positions and coordinate systems, and optionally turn on a
# connecting line and mark the point with a marker. Annotations
# work on polar axes too. In the example below, the xy point is
# in native coordinates (xycoords defaults to 'data'). For a
# polar axes, this is in (theta, radius) space. The text in this
# example is placed in the fractional figure coordinate system.
# Text keyword args like horizontal and vertical alignment are
# respected
# Setup some data
r = np.arange(0.0, 1.0, 0.001)
theta = 2.0 * 2.0 * np.pi * r
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
line, = ax.plot(theta, r, color='#ee8d18', lw=3)
line, = ax.plot((0, 0), (0, 1), color="#0000ff", lw=1)
ind = 800
thisr, thistheta = r[ind], theta[ind]
ax.plot([thistheta], [thisr], 'o')
ax.annotate('a polar annotation',
xy=(thistheta, thisr), # theta, radius
xytext=(0.05, 0.05), # fraction, fraction
textcoords='figure fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='baseline',
)
ax.tick_params(axis='x', tick1On=True, tick2On=True, direction='out')
@image_comparison(baseline_images=['polar_coords'], style='default',
remove_text=True)
def test_polar_coord_annotations():
# You can also use polar notation on a cartesian axes. Here the
# native coordinate system ('data') is cartesian, so you need to
# specify the xycoords and textcoords as 'polar' if you want to
# use (theta, radius)
from matplotlib.patches import Ellipse
el = Ellipse((0, 0), 10, 20, facecolor='r', alpha=0.5)
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.add_artist(el)
el.set_clip_box(ax.bbox)
ax.annotate('the top',
xy=(np.pi/2., 10.), # theta, radius
xytext=(np.pi/3, 20.), # theta, radius
xycoords='polar',
textcoords='polar',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='baseline',
clip_on=True, # clip to the axes bounding box
)
ax.set_xlim(-20, 20)
ax.set_ylim(-20, 20)
@image_comparison(baseline_images=['polar_alignment'], extensions=['png'])
def test_polar_alignment():
'''
Test that changing the vertical/horizontal alignment of a polar graph
works as expected '''
ranges = [(0, 5), (0, 5)]
angles = np.arange(0, 360, 90)
levels = 5
fig = plt.figure()
figureSize = [0.1, 0.1, 0.8, 0.8]
horizontal = fig.add_axes(figureSize, polar=True, label='horizontal')
vertical = fig.add_axes(figureSize, polar=True, label='vertical')
axes = [horizontal, vertical]
horizontal.set_thetagrids(angles)
vertical.patch.set_visible(False)
for i in range(2):
grid = np.linspace(*ranges[i], num=levels)
gridValues = [0, 0.2, 0.4, 0.6, 0.8, 1]
axes[i].set_rgrids(gridValues, angle=angles[i],
horizontalalignment='left',
verticalalignment='top')
@image_comparison(baseline_images=['fill_units'], extensions=['png'],
savefig_kwarg={'dpi': 60})
def test_fill_units():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t = units.Epoch("ET", dt=datetime(2009, 4, 27))
value = 10.0 * units.deg
day = units.Duration("ET", 24.0 * 60.0 * 60.0)
fig = plt.figure()
# Top-Left
ax1 = fig.add_subplot(221)
ax1.plot([t], [value], yunits='deg', color='red')
ax1.fill([733525.0, 733525.0, 733526.0, 733526.0],
[0.0, 0.0, 90.0, 0.0], 'b')
# Top-Right
ax2 = fig.add_subplot(222)
ax2.plot([t], [value], yunits='deg', color='red')
ax2.fill([t, t, t + day, t + day],
[0.0, 0.0, 90.0, 0.0], 'b')
# Bottom-Left
ax3 = fig.add_subplot(223)
ax3.plot([t], [value], yunits='deg', color='red')
ax3.fill([733525.0, 733525.0, 733526.0, 733526.0],
[0 * units.deg, 0 * units.deg, 90 * units.deg, 0 * units.deg],
'b')
# Bottom-Right
ax4 = fig.add_subplot(224)
ax4.plot([t], [value], yunits='deg', color='red')
ax4.fill([t, t, t + day, t + day],
[0 * units.deg, 0 * units.deg, 90 * units.deg, 0 * units.deg],
facecolor="blue")
fig.autofmt_xdate()
@image_comparison(baseline_images=['single_point', 'single_point'])
def test_single_point():
# Issue #1796: don't let lines.marker affect the grid
matplotlib.rcParams['lines.marker'] = 'o'
matplotlib.rcParams['axes.grid'] = True
fig = plt.figure()
plt.subplot(211)
plt.plot([0], [0], 'o')
plt.subplot(212)
plt.plot([1], [1], 'o')
# Reuse testcase from above for a labeled data test
data = {'a': [0], 'b': [1]}
fig = plt.figure()
plt.subplot(211)
plt.plot('a', 'a', 'o', data=data)
plt.subplot(212)
plt.plot('b', 'b', 'o', data=data)
@image_comparison(baseline_images=['single_date'], extensions=['png'],
style='mpl20')
def test_single_date():
time1 = [721964.0]
data1 = [-65.54]
fig = plt.figure()
plt.subplot(211)
plt.plot_date(time1, data1, 'o', color='r')
plt.subplot(212)
plt.plot(time1, data1, 'o', color='r')
@image_comparison(baseline_images=['shaped_data'])
def test_shaped_data():
xdata = np.array([[0.53295185, 0.23052951, 0.19057629, 0.66724975,
0.96577916, 0.73136095, 0.60823287, 0.01792100,
0.29744742, 0.27164665],
[0.27980120, 0.25814229, 0.02818193, 0.12966456,
0.57446277, 0.58167607, 0.71028245, 0.69112737,
0.89923072, 0.99072476],
[0.81218578, 0.80464528, 0.76071809, 0.85616314,
0.12757994, 0.94324936, 0.73078663, 0.09658102,
0.60703967, 0.77664978],
[0.28332265, 0.81479711, 0.86985333, 0.43797066,
0.32540082, 0.43819229, 0.92230363, 0.49414252,
0.68168256, 0.05922372],
[0.10721335, 0.93904142, 0.79163075, 0.73232848,
0.90283839, 0.68408046, 0.25502302, 0.95976614,
0.59214115, 0.13663711],
[0.28087456, 0.33127607, 0.15530412, 0.76558121,
0.83389773, 0.03735974, 0.98717738, 0.71432229,
0.54881366, 0.86893953],
[0.77995937, 0.99555600, 0.29688434, 0.15646162,
0.05184800, 0.37161935, 0.12998491, 0.09377296,
0.36882507, 0.36583435],
[0.37851836, 0.05315792, 0.63144617, 0.25003433,
0.69586032, 0.11393988, 0.92362096, 0.88045438,
0.93530252, 0.68275072],
[0.86486596, 0.83236675, 0.82960664, 0.57796630,
0.25724233, 0.84841095, 0.90862812, 0.64414887,
0.35652720, 0.71026066],
[0.01383268, 0.34060930, 0.76084285, 0.70800694,
0.87634056, 0.08213693, 0.54655021, 0.98123181,
0.44080053, 0.86815815]])
y1 = np.arange(10).reshape((1, -1))
y2 = np.arange(10).reshape((-1, 1))
fig = plt.figure()
plt.subplot(411)
plt.plot(y1)
plt.subplot(412)
plt.plot(y2)
plt.subplot(413)
with pytest.raises(ValueError):
plt.plot((y1, y2))
plt.subplot(414)
plt.plot(xdata[:, 1], xdata[1, :], 'o')
def test_structured_data():
# support for structured data
pts = np.array([(1, 1), (2, 2)], dtype=[("ones", float), ("twos", float)])
# this should not read second name as a format and raise ValueError
fig, ax = plt.subplots(2)
ax[0].plot("ones", "twos", data=pts)
ax[1].plot("ones", "twos", "r", data=pts)
@image_comparison(baseline_images=['const_xy'])
def test_const_xy():
fig = plt.figure()
plt.subplot(311)
plt.plot(np.arange(10), np.ones(10))
plt.subplot(312)
plt.plot(np.ones(10), np.arange(10))
plt.subplot(313)
plt.plot(np.ones(10), np.ones(10), 'o')
@image_comparison(baseline_images=['polar_wrap_180', 'polar_wrap_360'],
style='default')
def test_polar_wrap():
fig = plt.figure()
plt.subplot(111, polar=True)
plt.polar(np.deg2rad([179, -179]), [0.2, 0.1], "b.-")
plt.polar(np.deg2rad([179, 181]), [0.2, 0.1], "g.-")
plt.rgrids([0.05, 0.1, 0.15, 0.2, 0.25, 0.3])
assert len(fig.axes) == 1, 'More than one polar axes created.'
fig = plt.figure()
plt.subplot(111, polar=True)
plt.polar(np.deg2rad([2, -2]), [0.2, 0.1], "b.-")
plt.polar(np.deg2rad([2, 358]), [0.2, 0.1], "g.-")
plt.polar(np.deg2rad([358, 2]), [0.2, 0.1], "r.-")
plt.rgrids([0.05, 0.1, 0.15, 0.2, 0.25, 0.3])
@image_comparison(baseline_images=['polar_units', 'polar_units_2'],
style='default')
def test_polar_units():
import matplotlib.testing.jpl_units as units
units.register()
pi = np.pi
deg = units.deg
km = units.km
x1 = [pi/6.0, pi/4.0, pi/3.0, pi/2.0]
x2 = [30.0*deg, 45.0*deg, 60.0*deg, 90.0*deg]
y1 = [1.0, 2.0, 3.0, 4.0]
y2 = [4.0, 3.0, 2.0, 1.0]
fig = plt.figure()
plt.polar(x2, y1, color="blue")
# polar(x2, y1, color = "red", xunits="rad")
# polar(x2, y2, color = "green")
fig = plt.figure()
# make sure runits and theta units work
y1 = [y*km for y in y1]
plt.polar(x2, y1, color="blue", thetaunits="rad", runits="km")
assert isinstance(plt.gca().get_xaxis().get_major_formatter(),
units.UnitDblFormatter)
@image_comparison(baseline_images=['polar_rmin'], style='default')
def test_polar_rmin():
r = np.arange(0, 3.0, 0.01)
theta = 2*np.pi*r
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.plot(theta, r)
ax.set_rmax(2.0)
ax.set_rmin(0.5)
@image_comparison(baseline_images=['polar_negative_rmin'], style='default')
def test_polar_negative_rmin():
r = np.arange(-3.0, 0.0, 0.01)
theta = 2*np.pi*r
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.plot(theta, r)
ax.set_rmax(0.0)
ax.set_rmin(-3.0)
@image_comparison(baseline_images=['polar_rorigin'], style='default')
def test_polar_rorigin():
r = np.arange(0, 3.0, 0.01)
theta = 2*np.pi*r
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.plot(theta, r)
ax.set_rmax(2.0)
ax.set_rmin(0.5)
ax.set_rorigin(0.0)
@image_comparison(baseline_images=['polar_invertedylim'], style='default',
extensions=['png'])
def test_polar_invertedylim():
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.set_ylim(2, 0)
@image_comparison(baseline_images=['polar_invertedylim_rorigin'],
style='default', extensions=['png'])
def test_polar_invertedylim_rorigin():
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.set_ylim(2, 0)
ax.set_rorigin(3)
@image_comparison(baseline_images=['polar_theta_position'], style='default')
def test_polar_theta_position():
r = np.arange(0, 3.0, 0.01)
theta = 2*np.pi*r
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.plot(theta, r)
ax.set_theta_zero_location("NW", 30)
ax.set_theta_direction('clockwise')
@image_comparison(baseline_images=['polar_rlabel_position'], style='default')
def test_polar_rlabel_position():
fig = plt.figure()
ax = fig.add_subplot(111, projection='polar')
ax.set_rlabel_position(315)
ax.tick_params(rotation='auto')
@image_comparison(baseline_images=['polar_theta_wedge'], style='default')
def test_polar_theta_limits():
r = np.arange(0, 3.0, 0.01)
theta = 2*np.pi*r
theta_mins = np.arange(15.0, 361.0, 90.0)
theta_maxs = np.arange(50.0, 361.0, 90.0)
DIRECTIONS = ('out', 'in', 'inout')
fig, axes = plt.subplots(len(theta_mins), len(theta_maxs),
subplot_kw={'polar': True},
figsize=(8, 6))
for i, start in enumerate(theta_mins):
for j, end in enumerate(theta_maxs):
ax = axes[i, j]
ax.plot(theta, r)
if start < end:
ax.set_thetamin(start)
ax.set_thetamax(end)
else:
# Plot with clockwise orientation instead.
ax.set_thetamin(end)
ax.set_thetamax(start)
ax.set_theta_direction('clockwise')
ax.tick_params(tick1On=True, tick2On=True,
direction=DIRECTIONS[i % len(DIRECTIONS)],
rotation='auto')
ax.yaxis.set_tick_params(label2On=True, rotation='auto')
@check_figures_equal(extensions=["png"])
def test_polar_rlim(fig_test, fig_ref):
ax = fig_test.subplots(subplot_kw={'polar': True})
ax.set_rlim(top=10)
ax.set_rlim(bottom=.5)
ax = fig_ref.subplots(subplot_kw={'polar': True})
ax.set_rmax(10.)
ax.set_rmin(.5)
@check_figures_equal(extensions=["png"])
def test_polar_rlim_bottom(fig_test, fig_ref):
ax = fig_test.subplots(subplot_kw={'polar': True})
ax.set_rlim(bottom=[.5, 10])
ax = fig_ref.subplots(subplot_kw={'polar': True})
ax.set_rmax(10.)
ax.set_rmin(.5)
@image_comparison(baseline_images=['axvspan_epoch'])
def test_axvspan_epoch():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t0 = units.Epoch("ET", dt=datetime(2009, 1, 20))
tf = units.Epoch("ET", dt=datetime(2009, 1, 21))
dt = units.Duration("ET", units.day.convert("sec"))
fig = plt.figure()
plt.axvspan(t0, tf, facecolor="blue", alpha=0.25)
ax = plt.gca()
ax.set_xlim(t0 - 5.0*dt, tf + 5.0*dt)
@image_comparison(baseline_images=['axhspan_epoch'])
def test_axhspan_epoch():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t0 = units.Epoch("ET", dt=datetime(2009, 1, 20))
tf = units.Epoch("ET", dt=datetime(2009, 1, 21))
dt = units.Duration("ET", units.day.convert("sec"))
fig = plt.figure()
plt.axhspan(t0, tf, facecolor="blue", alpha=0.25)
ax = plt.gca()
ax.set_ylim(t0 - 5.0*dt, tf + 5.0*dt)
@image_comparison(baseline_images=['hexbin_extent', 'hexbin_extent'],
remove_text=True, extensions=['png'])
def test_hexbin_extent():
# this test exposes sf bug 2856228
fig, ax = plt.subplots()
data = (np.arange(2000) / 2000).reshape((2, 1000))
x, y = data
ax.hexbin(x, y, extent=[.1, .3, .6, .7])
# Reuse testcase from above for a labeled data test
data = {"x": x, "y": y}
fig, ax = plt.subplots()
ax.hexbin("x", "y", extent=[.1, .3, .6, .7], data=data)
@image_comparison(baseline_images=['hexbin_empty'], remove_text=True,
extensions=['png'])
def test_hexbin_empty():
# From #3886: creating hexbin from empty dataset raises ValueError
ax = plt.gca()
ax.hexbin([], [])
def test_hexbin_pickable():
# From #1973: Test that picking a hexbin collection works
class FauxMouseEvent:
def __init__(self, x, y):
self.x = x
self.y = y
fig, ax = plt.subplots()
data = (np.arange(200) / 200).reshape((2, 100))
x, y = data
hb = ax.hexbin(x, y, extent=[.1, .3, .6, .7], picker=-1)
assert hb.contains(FauxMouseEvent(400, 300))[0]
@image_comparison(baseline_images=['hexbin_log'],
extensions=['png'], style='mpl20')
def test_hexbin_log():
# Issue #1636 (and also test log scaled colorbar)
np.random.seed(19680801)
n = 100000
x = np.random.standard_normal(n)
y = 2.0 + 3.0 * x + 4.0 * np.random.standard_normal(n)
y = np.power(2, y * 0.5)
fig, ax = plt.subplots()
h = ax.hexbin(x, y, yscale='log', bins='log')
plt.colorbar(h)
def test_inverted_limits():
# Test gh:1553
# Calling invert_xaxis prior to plotting should not disable autoscaling
# while still maintaining the inverted direction
fig, ax = plt.subplots()
ax.invert_xaxis()
ax.plot([-5, -3, 2, 4], [1, 2, -3, 5])
assert ax.get_xlim() == (4, -5)
assert ax.get_ylim() == (-3, 5)
plt.close()
fig, ax = plt.subplots()
ax.invert_yaxis()
ax.plot([-5, -3, 2, 4], [1, 2, -3, 5])
assert ax.get_xlim() == (-5, 4)
assert ax.get_ylim() == (5, -3)
plt.close()
@image_comparison(baseline_images=['nonfinite_limits'])
def test_nonfinite_limits():
x = np.arange(0., np.e, 0.01)
# silence divide by zero warning from log(0)
with np.errstate(divide='ignore'):
y = np.log(x)
x[len(x)//2] = np.nan
fig, ax = plt.subplots()
ax.plot(x, y)
@image_comparison(baseline_images=['imshow', 'imshow'],
remove_text=True, style='mpl20')
def test_imshow():
# Create a NxN image
N = 100
(x, y) = np.indices((N, N))
x -= N//2
y -= N//2
r = np.sqrt(x**2+y**2-x*y)
# Create a contour plot at N/4 and extract both the clip path and transform
fig, ax = plt.subplots()
ax.imshow(r)
# Reuse testcase from above for a labeled data test
data = {"r": r}
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow("r", data=data)
@image_comparison(baseline_images=['imshow_clip'], style='mpl20')
def test_imshow_clip():
# As originally reported by Gellule Xg <[email protected]>
# Create a NxN image
N = 100
(x, y) = np.indices((N, N))
x -= N//2
y -= N//2
r = np.sqrt(x**2+y**2-x*y)
# Create a contour plot at N/4 and extract both the clip path and transform
fig, ax = plt.subplots()
c = ax.contour(r, [N/4])
x = c.collections[0]
clipPath = x.get_paths()[0]
clipTransform = x.get_transform()
from matplotlib.transforms import TransformedPath
clip_path = TransformedPath(clipPath, clipTransform)
# Plot the image clipped by the contour
ax.imshow(r, clip_path=clip_path)
@image_comparison(baseline_images=['polycollection_joinstyle'],
remove_text=True)
def test_polycollection_joinstyle():
# Bug #2890979 reported by Matthew West
from matplotlib import collections as mcoll
fig, ax = plt.subplots()
verts = np.array([[1, 1], [1, 2], [2, 2], [2, 1]])
c = mcoll.PolyCollection([verts], linewidths=40)
ax.add_collection(c)
ax.set_xbound(0, 3)
ax.set_ybound(0, 3)
@pytest.mark.parametrize(
'x, y1, y2', [
(np.zeros((2, 2)), 3, 3),
(np.arange(0.0, 2, 0.02), np.zeros((2, 2)), 3),
(np.arange(0.0, 2, 0.02), 3, np.zeros((2, 2)))
], ids=[
'2d_x_input',
'2d_y1_input',
'2d_y2_input'
]
)
def test_fill_between_input(x, y1, y2):
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.fill_between(x, y1, y2)
@pytest.mark.parametrize(
'y, x1, x2', [
(np.zeros((2, 2)), 3, 3),
(np.arange(0.0, 2, 0.02), np.zeros((2, 2)), 3),
(np.arange(0.0, 2, 0.02), 3, np.zeros((2, 2)))
], ids=[
'2d_y_input',
'2d_x1_input',
'2d_x2_input'
]
)
def test_fill_betweenx_input(y, x1, x2):
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.fill_betweenx(y, x1, x2)
@image_comparison(baseline_images=['fill_between_interpolate'],
remove_text=True)
def test_fill_between_interpolate():
x = np.arange(0.0, 2, 0.02)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.plot(x, y1, x, y2, color='black')
ax1.fill_between(x, y1, y2, where=y2 >= y1, facecolor='white', hatch='/',
interpolate=True)
ax1.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red',
interpolate=True)
# Test support for masked arrays.
y2 = np.ma.masked_greater(y2, 1.0)
# Test that plotting works for masked arrays with the first element masked
y2[0] = np.ma.masked
ax2.plot(x, y1, x, y2, color='black')
ax2.fill_between(x, y1, y2, where=y2 >= y1, facecolor='green',
interpolate=True)
ax2.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red',
interpolate=True)
@image_comparison(baseline_images=['fill_between_interpolate_decreasing'],
style='mpl20', remove_text=True)
def test_fill_between_interpolate_decreasing():
p = np.array([724.3, 700, 655])
t = np.array([9.4, 7, 2.2])
prof = np.array([7.9, 6.6, 3.8])
fig, ax = plt.subplots(figsize=(9, 9))
ax.plot(t, p, 'tab:red')
ax.plot(prof, p, 'k')
ax.fill_betweenx(p, t, prof, where=prof < t,
facecolor='blue', interpolate=True, alpha=0.4)
ax.fill_betweenx(p, t, prof, where=prof > t,
facecolor='red', interpolate=True, alpha=0.4)
ax.set_xlim(0, 30)
ax.set_ylim(800, 600)
# test_symlog and test_symlog2 used to have baseline images in all three
# formats, but the png and svg baselines got invalidated by the removal of
# minor tick overstriking.
@image_comparison(baseline_images=['symlog'], extensions=['pdf'])
def test_symlog():
x = np.array([0, 1, 2, 4, 6, 9, 12, 24])
y = np.array([1000000, 500000, 100000, 100, 5, 0, 0, 0])
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_yscale('symlog')
ax.set_xscale('linear')
ax.set_ylim(-1, 10000000)
@image_comparison(baseline_images=['symlog2'], extensions=['pdf'],
remove_text=True)
def test_symlog2():
# Numbers from -50 to 50, with 0.1 as step
x = np.arange(-50, 50, 0.001)
fig, axs = plt.subplots(5, 1)
for ax, linthreshx in zip(axs, [20., 2., 1., 0.1, 0.01]):
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=linthreshx)
ax.grid(True)
axs[-1].set_ylim(-0.1, 0.1)
def test_pcolorargs_5205():
# Smoketest to catch issue found in gh:5205
x = [-1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5]
y = [-1.5, -1.25, -1.0, -0.75, -0.5, -0.25, 0,
0.25, 0.5, 0.75, 1.0, 1.25, 1.5]
X, Y = np.meshgrid(x, y)
Z = np.hypot(X, Y)
plt.pcolor(Z)
plt.pcolor(list(Z))
plt.pcolor(x, y, Z)
plt.pcolor(X, Y, list(Z))
@image_comparison(baseline_images=['pcolormesh'], remove_text=True)
def test_pcolormesh():
n = 12
x = np.linspace(-1.5, 1.5, n)
y = np.linspace(-1.5, 1.5, n*2)
X, Y = np.meshgrid(x, y)
Qx = np.cos(Y) - np.cos(X)
Qz = np.sin(Y) + np.sin(X)
Qx = (Qx + 1.1)
Z = np.hypot(X, Y) / 5
Z = (Z - Z.min()) / Z.ptp()
# The color array can include masked values:
Zm = ma.masked_where(np.abs(Qz) < 0.5 * np.max(Qz), Z)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.pcolormesh(Qx, Qz, Z, lw=0.5, edgecolors='k')
ax2.pcolormesh(Qx, Qz, Z, lw=2, edgecolors=['b', 'w'])
ax3.pcolormesh(Qx, Qz, Z, shading="gouraud")
@image_comparison(baseline_images=['pcolormesh_datetime_axis'],
extensions=['png'], remove_text=False, style='mpl20')
def test_pcolormesh_datetime_axis():
fig = plt.figure()
fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)
base = datetime.datetime(2013, 1, 1)
x = np.array([base + datetime.timedelta(days=d) for d in range(21)])
y = np.arange(21)
z1, z2 = np.meshgrid(np.arange(20), np.arange(20))
z = z1 * z2
plt.subplot(221)
plt.pcolormesh(x[:-1], y[:-1], z)
plt.subplot(222)
plt.pcolormesh(x, y, z)
x = np.repeat(x[np.newaxis], 21, axis=0)
y = np.repeat(y[:, np.newaxis], 21, axis=1)
plt.subplot(223)
plt.pcolormesh(x[:-1, :-1], y[:-1, :-1], z)
plt.subplot(224)
plt.pcolormesh(x, y, z)
for ax in fig.get_axes():
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
@image_comparison(baseline_images=['pcolor_datetime_axis'],
extensions=['png'], remove_text=False, style='mpl20')
def test_pcolor_datetime_axis():
fig = plt.figure()
fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)
base = datetime.datetime(2013, 1, 1)
x = np.array([base + datetime.timedelta(days=d) for d in range(21)])
y = np.arange(21)
z1, z2 = np.meshgrid(np.arange(20), np.arange(20))
z = z1 * z2
plt.subplot(221)
plt.pcolor(x[:-1], y[:-1], z)
plt.subplot(222)
plt.pcolor(x, y, z)
x = np.repeat(x[np.newaxis], 21, axis=0)
y = np.repeat(y[:, np.newaxis], 21, axis=1)
plt.subplot(223)
plt.pcolor(x[:-1, :-1], y[:-1, :-1], z)
plt.subplot(224)
plt.pcolor(x, y, z)
for ax in fig.get_axes():
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
def test_pcolorargs():
n = 12
x = np.linspace(-1.5, 1.5, n)
y = np.linspace(-1.5, 1.5, n*2)
X, Y = np.meshgrid(x, y)
Z = np.hypot(X, Y) / 5
_, ax = plt.subplots()
with pytest.raises(TypeError):
ax.pcolormesh(y, x, Z)
with pytest.raises(TypeError):
ax.pcolormesh(X, Y, Z.T)
with pytest.raises(TypeError):
ax.pcolormesh(x, y, Z[:-1, :-1], shading="gouraud")
with pytest.raises(TypeError):
ax.pcolormesh(X, Y, Z[:-1, :-1], shading="gouraud")
x[0] = np.NaN
with pytest.raises(ValueError):
ax.pcolormesh(x, y, Z[:-1, :-1])
with np.errstate(invalid='ignore'):
x = np.ma.array(x, mask=(x < 0))
with pytest.raises(ValueError):
ax.pcolormesh(x, y, Z[:-1, :-1])
@image_comparison(baseline_images=['canonical'])
def test_canonical():
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
@image_comparison(baseline_images=['arc_angles'], remove_text=True,
style='default', extensions=['png'])
def test_arc_angles():
from matplotlib import patches
# Ellipse parameters
w = 2
h = 1
centre = (0.2, 0.5)
scale = 2
fig, axs = plt.subplots(3, 3)
for i, ax in enumerate(axs.flat):
theta2 = i * 360 / 9
theta1 = theta2 - 45
ax.add_patch(patches.Ellipse(centre, w, h, alpha=0.3))
ax.add_patch(patches.Arc(centre, w, h, theta1=theta1, theta2=theta2))
# Straight lines intersecting start and end of arc
ax.plot([scale * np.cos(np.deg2rad(theta1)) + centre[0],
centre[0],
scale * np.cos(np.deg2rad(theta2)) + centre[0]],
[scale * np.sin(np.deg2rad(theta1)) + centre[1],
centre[1],
scale * np.sin(np.deg2rad(theta2)) + centre[1]])
ax.set_xlim(-scale, scale)
ax.set_ylim(-scale, scale)
# This looks the same, but it triggers a different code path when it
# gets large enough.
w *= 10
h *= 10
centre = (centre[0] * 10, centre[1] * 10)
scale *= 10
@image_comparison(baseline_images=['arc_ellipse'],
remove_text=True)
def test_arc_ellipse():
from matplotlib import patches
xcenter, ycenter = 0.38, 0.52
width, height = 1e-1, 3e-1
angle = -30
theta = np.deg2rad(np.arange(360))
x = width / 2. * np.cos(theta)
y = height / 2. * np.sin(theta)
rtheta = np.deg2rad(angle)
R = np.array([
[np.cos(rtheta), -np.sin(rtheta)],
[np.sin(rtheta), np.cos(rtheta)]])
x, y = np.dot(R, np.array([x, y]))
x += xcenter
y += ycenter
fig = plt.figure()
ax = fig.add_subplot(211, aspect='auto')
ax.fill(x, y, alpha=0.2, facecolor='yellow', edgecolor='yellow',
linewidth=1, zorder=1)
e1 = patches.Arc((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e1)
ax = fig.add_subplot(212, aspect='equal')
ax.fill(x, y, alpha=0.2, facecolor='green', edgecolor='green', zorder=1)
e2 = patches.Arc((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e2)
@image_comparison(baseline_images=['markevery'],
remove_text=True)
def test_markevery():
x = np.linspace(0, 10, 100)
y = np.sin(x) * np.sqrt(x/10 + 0.5)
# check marker only plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, 'o', label='default')
ax.plot(x, y, 'd', markevery=None, label='mark all')
ax.plot(x, y, 's', markevery=10, label='mark every 10')
ax.plot(x, y, '+', markevery=(5, 20), label='mark every 5 starting at 10')
ax.legend()
@image_comparison(baseline_images=['markevery_line'],
remove_text=True)
def test_markevery_line():
x = np.linspace(0, 10, 100)
y = np.sin(x) * np.sqrt(x/10 + 0.5)
# check line/marker combos
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, '-o', label='default')
ax.plot(x, y, '-d', markevery=None, label='mark all')
ax.plot(x, y, '-s', markevery=10, label='mark every 10')
ax.plot(x, y, '-+', markevery=(5, 20), label='mark every 5 starting at 10')
ax.legend()
@image_comparison(baseline_images=['markevery_linear_scales'],
remove_text=True)
def test_markevery_linear_scales():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0, -1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
delta = 0.11
x = np.linspace(0, 10 - 2 * delta, 200) + delta
y = np.sin(x) + 1.0 + delta
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col])
plt.title('markevery=%s' % str(case))
plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)
@image_comparison(baseline_images=['markevery_linear_scales_zoomed'],
remove_text=True)
def test_markevery_linear_scales_zoomed():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0, -1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
delta = 0.11
x = np.linspace(0, 10 - 2 * delta, 200) + delta
y = np.sin(x) + 1.0 + delta
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col])
plt.title('markevery=%s' % str(case))
plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)
plt.xlim((6, 6.7))
plt.ylim((1.1, 1.7))
@image_comparison(baseline_images=['markevery_log_scales'],
remove_text=True)
def test_markevery_log_scales():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0, -1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
delta = 0.11
x = np.linspace(0, 10 - 2 * delta, 200) + delta
y = np.sin(x) + 1.0 + delta
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col])
plt.title('markevery=%s' % str(case))
plt.xscale('log')
plt.yscale('log')
plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)
@image_comparison(baseline_images=['markevery_polar'], style='default',
remove_text=True)
def test_markevery_polar():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0, -1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
r = np.linspace(0, 3.0, 200)
theta = 2 * np.pi * r
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col], polar=True)
plt.title('markevery=%s' % str(case))
plt.plot(theta, r, 'o', ls='-', ms=4, markevery=case)
@image_comparison(baseline_images=['marker_edges'],
remove_text=True)
def test_marker_edges():
x = np.linspace(0, 1, 10)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, np.sin(x), 'y.', ms=30.0, mew=0, mec='r')
ax.plot(x+0.1, np.sin(x), 'y.', ms=30.0, mew=1, mec='r')
ax.plot(x+0.2, np.sin(x), 'y.', ms=30.0, mew=2, mec='b')
@image_comparison(baseline_images=['bar_tick_label_single',
'bar_tick_label_single'],
extensions=['png'])
def test_bar_tick_label_single():
# From 2516: plot bar with array of string labels for x axis
ax = plt.gca()
ax.bar(0, 1, align='edge', tick_label='0')
# Reuse testcase from above for a labeled data test
data = {"a": 0, "b": 1}
fig = plt.figure()
ax = fig.add_subplot(111)
ax = plt.gca()
ax.bar("a", "b", align='edge', tick_label='0', data=data)
def test_bar_ticklabel_fail():
fig, ax = plt.subplots()
ax.bar([], [])
@image_comparison(baseline_images=['bar_tick_label_multiple'],
extensions=['png'])
def test_bar_tick_label_multiple():
# From 2516: plot bar with array of string labels for x axis
ax = plt.gca()
ax.bar([1, 2.5], [1, 2], width=[0.2, 0.5], tick_label=['a', 'b'],
align='center')
@image_comparison(
baseline_images=['bar_tick_label_multiple_old_label_alignment'],
extensions=['png'])
def test_bar_tick_label_multiple_old_alignment():
# Test that the alignment for class is backward compatible
matplotlib.rcParams["ytick.alignment"] = "center"
ax = plt.gca()
ax.bar([1, 2.5], [1, 2], width=[0.2, 0.5], tick_label=['a', 'b'],
align='center')
def test_bar_color_none_alpha():
ax = plt.gca()
rects = ax.bar([1, 2], [2, 4], alpha=0.3, color='none', edgecolor='r')
for rect in rects:
assert rect.get_facecolor() == (0, 0, 0, 0)
assert rect.get_edgecolor() == (1, 0, 0, 0.3)
def test_bar_edgecolor_none_alpha():
ax = plt.gca()
rects = ax.bar([1, 2], [2, 4], alpha=0.3, color='r', edgecolor='none')
for rect in rects:
assert rect.get_facecolor() == (1, 0, 0, 0.3)
assert rect.get_edgecolor() == (0, 0, 0, 0)
@image_comparison(baseline_images=['barh_tick_label'],
extensions=['png'])
def test_barh_tick_label():
# From 2516: plot barh with array of string labels for y axis
ax = plt.gca()
ax.barh([1, 2.5], [1, 2], height=[0.2, 0.5], tick_label=['a', 'b'],
align='center')
def test_bar_timedelta():
"""smoketest that bar can handle width and height in delta units"""
fig, ax = plt.subplots()
ax.bar(datetime.datetime(2018, 1, 1), 1.,
width=datetime.timedelta(hours=3))
ax.bar(datetime.datetime(2018, 1, 1), 1.,
xerr=datetime.timedelta(hours=2),
width=datetime.timedelta(hours=3))
fig, ax = plt.subplots()
ax.barh(datetime.datetime(2018, 1, 1), 1,
height=datetime.timedelta(hours=3))
ax.barh(datetime.datetime(2018, 1, 1), 1,
height=datetime.timedelta(hours=3),
yerr=datetime.timedelta(hours=2))
fig, ax = plt.subplots()
ax.barh([datetime.datetime(2018, 1, 1), datetime.datetime(2018, 1, 1)],
np.array([1, 1.5]),
height=datetime.timedelta(hours=3))
ax.barh([datetime.datetime(2018, 1, 1), datetime.datetime(2018, 1, 1)],
np.array([1, 1.5]),
height=[datetime.timedelta(hours=t) for t in [1, 2]])
ax.broken_barh([(datetime.datetime(2018, 1, 1),
datetime.timedelta(hours=1))],
(10, 20))
def test_boxplot_dates_pandas(pd):
# smoke test for boxplot and dates in pandas
data = np.random.rand(5, 2)
years = pd.date_range('1/1/2000',
periods=2, freq=pd.DateOffset(years=1)).year
plt.figure()
plt.boxplot(data, positions=years)
def test_bar_pandas(pd):
# Smoke test for pandas
fig, ax = plt.subplots()
df = pd.DataFrame(
{'year': [2018, 2018, 2018],
'month': [1, 1, 1],
'day': [1, 2, 3],
'value': [1, 2, 3]})
df['date'] = pd.to_datetime(df[['year', 'month', 'day']])
monthly = df[['date', 'value']].groupby(['date']).sum()
dates = monthly.index
forecast = monthly['value']
baseline = monthly['value']
ax.bar(dates, forecast, width=10, align='center')
ax.plot(dates, baseline, color='orange', lw=4)
@image_comparison(baseline_images=['hist_log'],
remove_text=True)
def test_hist_log():
data0 = np.linspace(0, 1, 200)**3
data = np.r_[1-data0, 1+data0]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(data, fill=False, log=True)
@image_comparison(baseline_images=['hist_bar_empty'], remove_text=True,
extensions=['png'])
def test_hist_bar_empty():
# From #3886: creating hist from empty dataset raises ValueError
ax = plt.gca()
ax.hist([], histtype='bar')
@image_comparison(baseline_images=['hist_step_empty'], remove_text=True,
extensions=['png'])
def test_hist_step_empty():
# From #3886: creating hist from empty dataset raises ValueError
ax = plt.gca()
ax.hist([], histtype='step')
@image_comparison(baseline_images=['hist_steplog'], remove_text=True, tol=0.1)
def test_hist_steplog():
np.random.seed(0)
data = np.random.standard_normal(2000)
data += -2.0 - np.min(data)
data_pos = data + 2.1
data_big = data_pos + 30
weights = np.ones_like(data) * 1.e-5
ax = plt.subplot(4, 1, 1)
plt.hist(data, 100, histtype='stepfilled', log=True)
ax = plt.subplot(4, 1, 2)
plt.hist(data_pos, 100, histtype='stepfilled', log=True)
ax = plt.subplot(4, 1, 3)
plt.hist(data, 100, weights=weights, histtype='stepfilled', log=True)
ax = plt.subplot(4, 1, 4)
plt.hist(data_big, 100, histtype='stepfilled', log=True,
orientation='horizontal')
@image_comparison(baseline_images=['hist_step_filled'], remove_text=True,
extensions=['png'])
def test_hist_step_filled():
np.random.seed(0)
x = np.random.randn(1000, 3)
n_bins = 10
kwargs = [{'fill': True}, {'fill': False}, {'fill': None}, {}]*2
types = ['step']*4+['stepfilled']*4
fig, axes = plt.subplots(nrows=2, ncols=4)
axes = axes.flatten()
for kg, _type, ax in zip(kwargs, types, axes):
ax.hist(x, n_bins, histtype=_type, stacked=True, **kg)
ax.set_title('%s/%s' % (kg, _type))
ax.set_ylim(bottom=-50)
patches = axes[0].patches
assert all(p.get_facecolor() == p.get_edgecolor() for p in patches)
@image_comparison(baseline_images=['hist_density'], extensions=['png'])
def test_hist_density():
np.random.seed(19680801)
data = np.random.standard_normal(2000)
fig, ax = plt.subplots()
ax.hist(data, density=True)
@image_comparison(baseline_images=['hist_step_log_bottom'],
remove_text=True, extensions=['png'])
def test_hist_step_log_bottom():
# check that bottom doesn't get overwritten by the 'minimum' on a
# log scale histogram (https://github.com/matplotlib/matplotlib/pull/4608)
np.random.seed(0)
data = np.random.standard_normal(2000)
fig = plt.figure()
ax = fig.add_subplot(111)
# normal hist (should clip minimum to 1/base)
ax.hist(data, bins=10, log=True, histtype='stepfilled',
alpha=0.5, color='b')
# manual bottom < 1/base (previously buggy, see #4608)
ax.hist(data, bins=10, log=True, histtype='stepfilled',
alpha=0.5, color='g', bottom=1e-2)
# manual bottom > 1/base
ax.hist(data, bins=10, log=True, histtype='stepfilled',
alpha=0.5, color='r', bottom=0.5)
# array bottom with some less than 1/base (should clip to 1/base)
ax.hist(data, bins=10, log=True, histtype='stepfilled',
alpha=0.5, color='y', bottom=np.arange(10))
ax.set_ylim(9e-3, 1e3)
def test_hist_unequal_bins_density():
# Test correct behavior of normalized histogram with unequal bins
# https://github.com/matplotlib/matplotlib/issues/9557
rng = np.random.RandomState(57483)
t = rng.randn(100)
bins = [-3, -1, -0.5, 0, 1, 5]
mpl_heights, _, _ = plt.hist(t, bins=bins, density=True)
np_heights, _ = np.histogram(t, bins=bins, density=True)
assert_allclose(mpl_heights, np_heights)
def test_hist_datetime_datasets():
data = [[datetime.datetime(2017, 1, 1), datetime.datetime(2017, 1, 1)],
[datetime.datetime(2017, 1, 1), datetime.datetime(2017, 1, 2)]]
fig, ax = plt.subplots()
ax.hist(data, stacked=True)
ax.hist(data, stacked=False)
@pytest.mark.parametrize('data, expected_number_of_hists',
[([], 1),
([[]], 1),
([[], []], 2)])
def test_hist_with_empty_input(data, expected_number_of_hists):
hists, _, _ = plt.hist(data)
hists = np.asarray(hists)
if hists.ndim == 1:
assert 1 == expected_number_of_hists
else:
assert hists.shape[0] == expected_number_of_hists
def contour_dat():
x = np.linspace(-3, 5, 150)
y = np.linspace(-3, 5, 120)
z = np.cos(x) + np.sin(y[:, np.newaxis])
return x, y, z
@image_comparison(baseline_images=['contour_hatching'],
remove_text=True, style='mpl20')
def test_contour_hatching():
x, y, z = contour_dat()
fig = plt.figure()
ax = fig.add_subplot(111)
cs = ax.contourf(x, y, z, 7, hatches=['/', '\\', '//', '-'],
cmap=plt.get_cmap('gray'),
extend='both', alpha=0.5)
@image_comparison(baseline_images=['contour_colorbar'],
style='mpl20')
def test_contour_colorbar():
x, y, z = contour_dat()
fig = plt.figure()
ax = fig.add_subplot(111)
cs = ax.contourf(x, y, z, levels=np.arange(-1.8, 1.801, 0.2),
cmap=plt.get_cmap('RdBu'),
vmin=-0.6,
vmax=0.6,
extend='both')
cs1 = ax.contour(x, y, z, levels=np.arange(-2.2, -0.599, 0.2),
colors=['y'],
linestyles='solid',
linewidths=2)
cs2 = ax.contour(x, y, z, levels=np.arange(0.6, 2.2, 0.2),
colors=['c'],
linewidths=2)
cbar = fig.colorbar(cs, ax=ax)
cbar.add_lines(cs1)
cbar.add_lines(cs2, erase=False)
@image_comparison(baseline_images=['hist2d', 'hist2d'],
remove_text=True, style='mpl20')
def test_hist2d():
np.random.seed(0)
# make it not symmetric in case we switch x and y axis
x = np.random.randn(100)*2+5
y = np.random.randn(100)-2
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist2d(x, y, bins=10, rasterized=True)
# Reuse testcase from above for a labeled data test
data = {"x": x, "y": y}
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist2d("x", "y", bins=10, data=data, rasterized=True)
@image_comparison(baseline_images=['hist2d_transpose'],
remove_text=True, style='mpl20')
def test_hist2d_transpose():
np.random.seed(0)
# make sure the output from np.histogram is transposed before
# passing to pcolorfast
x = np.array([5]*100)
y = np.random.randn(100)-2
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist2d(x, y, bins=10, rasterized=True)
def test_hist2d_density_normed():
x, y = np.random.random((2, 100))
ax = plt.figure().subplots()
for obj in [ax, plt]:
obj.hist2d(x, y, density=True)
with pytest.warns(MatplotlibDeprecationWarning):
obj.hist2d(x, y, normed=True)
with pytest.warns(MatplotlibDeprecationWarning):
obj.hist2d(x, y, density=True, normed=True)
class TestScatter(object):
@image_comparison(baseline_images=['scatter'],
style='mpl20', remove_text=True)
def test_scatter_plot(self):
data = {"x": np.array([3, 4, 2, 6]), "y": np.array([2, 5, 2, 3]),
"c": ['r', 'y', 'b', 'lime'], "s": [24, 15, 19, 29],
"c2": ['0.5', '0.6', '0.7', '0.8']}
fig, ax = plt.subplots()
ax.scatter(data["x"] - 1., data["y"] - 1., c=data["c"], s=data["s"])
ax.scatter(data["x"] + 1., data["y"] + 1., c=data["c2"], s=data["s"])
ax.scatter("x", "y", c="c", s="s", data=data)
@image_comparison(baseline_images=['scatter_marker'], remove_text=True,
extensions=['png'])
def test_scatter_marker(self):
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3)
ax0.scatter([3, 4, 2, 6], [2, 5, 2, 3],
c=[(1, 0, 0), 'y', 'b', 'lime'],
s=[60, 50, 40, 30],
edgecolors=['k', 'r', 'g', 'b'],
marker='s')
ax1.scatter([3, 4, 2, 6], [2, 5, 2, 3],
c=[(1, 0, 0), 'y', 'b', 'lime'],
s=[60, 50, 40, 30],
edgecolors=['k', 'r', 'g', 'b'],
marker=mmarkers.MarkerStyle('o', fillstyle='top'))
# unit area ellipse
rx, ry = 3, 1
area = rx * ry * np.pi
theta = np.linspace(0, 2 * np.pi, 21)
verts = np.column_stack([np.cos(theta) * rx / area,
np.sin(theta) * ry / area])
ax2.scatter([3, 4, 2, 6], [2, 5, 2, 3],
c=[(1, 0, 0), 'y', 'b', 'lime'],
s=[60, 50, 40, 30],
edgecolors=['k', 'r', 'g', 'b'],
marker=verts)
@image_comparison(baseline_images=['scatter_2D'], remove_text=True,
extensions=['png'])
def test_scatter_2D(self):
x = np.arange(3)
y = np.arange(2)
x, y = np.meshgrid(x, y)
z = x + y
fig, ax = plt.subplots()
ax.scatter(x, y, c=z, s=200, edgecolors='face')
def test_scatter_color(self):
# Try to catch cases where 'c' kwarg should have been used.
with pytest.raises(ValueError):
plt.scatter([1, 2], [1, 2], color=[0.1, 0.2])
with pytest.raises(ValueError):
plt.scatter([1, 2, 3], [1, 2, 3], color=[1, 2, 3])
@check_figures_equal(extensions=["png"])
def test_scatter_invalid_color(self, fig_test, fig_ref):
ax = fig_test.subplots()
cmap = plt.get_cmap("viridis", 16)
cmap.set_bad("k", 1)
# Set a nonuniform size to prevent the last call to `scatter` (plotting
# the invalid points separately in fig_ref) from using the marker
# stamping fast path, which would result in slightly offset markers.
ax.scatter(range(4), range(4),
c=[1, np.nan, 2, np.nan], s=[1, 2, 3, 4],
cmap=cmap, plotnonfinite=True)
ax = fig_ref.subplots()
cmap = plt.get_cmap("viridis", 16)
ax.scatter([0, 2], [0, 2], c=[1, 2], s=[1, 3], cmap=cmap)
ax.scatter([1, 3], [1, 3], s=[2, 4], color="k")
@check_figures_equal(extensions=["png"])
def test_scatter_no_invalid_color(self, fig_test, fig_ref):
# With plotninfinite=False we plot only 2 points.
ax = fig_test.subplots()
cmap = plt.get_cmap("viridis", 16)
cmap.set_bad("k", 1)
ax.scatter(range(4), range(4),
c=[1, np.nan, 2, np.nan], s=[1, 2, 3, 4],
cmap=cmap, plotnonfinite=False)
ax = fig_ref.subplots()
ax.scatter([0, 2], [0, 2], c=[1, 2], s=[1, 3], cmap=cmap)
# Parameters for *test_scatter_c*. NB: assuming that the
# scatter plot will have 4 elements. The tuple scheme is:
# (*c* parameter case, exception regexp key or None if no exception)
params_test_scatter_c = [
# single string:
('0.5', None),
# Single letter-sequences
("rgby", None),
("rgb", "shape"),
("rgbrgb", "shape"),
(["rgby"], "conversion"),
# Special cases
("red", None),
("none", None),
(None, None),
(["r", "g", "b", "none"], None),
# Non-valid color spec (FWIW, 'jaune' means yellow in French)
("jaune", "conversion"),
(["jaune"], "conversion"), # wrong type before wrong size
(["jaune"]*4, "conversion"),
# Value-mapping like
([0.5]*3, None), # should emit a warning for user's eyes though
([0.5]*4, None), # NB: no warning as matching size allows mapping
([0.5]*5, "shape"),
# list of strings:
(['0.5', '0.4', '0.6', '0.7'], None),
(['0.5', 'red', '0.6', 'C5'], None),
(['0.5', 0.5, '0.6', 'C5'], "conversion"),
# RGB values
([[1, 0, 0]], None),
([[1, 0, 0]]*3, "shape"),
([[1, 0, 0]]*4, None),
([[1, 0, 0]]*5, "shape"),
# RGBA values
([[1, 0, 0, 0.5]], None),
([[1, 0, 0, 0.5]]*3, "shape"),
([[1, 0, 0, 0.5]]*4, None),
([[1, 0, 0, 0.5]]*5, "shape"),
# Mix of valid color specs
([[1, 0, 0, 0.5]]*3 + [[1, 0, 0]], None),
([[1, 0, 0, 0.5], "red", "0.0"], "shape"),
([[1, 0, 0, 0.5], "red", "0.0", "C5"], None),
([[1, 0, 0, 0.5], "red", "0.0", "C5", [0, 1, 0]], "shape"),
# Mix of valid and non valid color specs
([[1, 0, 0, 0.5], "red", "jaune"], "conversion"),
([[1, 0, 0, 0.5], "red", "0.0", "jaune"], "conversion"),
([[1, 0, 0, 0.5], "red", "0.0", "C5", "jaune"], "conversion"),
]
@pytest.mark.parametrize('c_case, re_key', params_test_scatter_c)
def test_scatter_c(self, c_case, re_key):
def get_next_color():
return 'blue' # currently unused
from matplotlib.axes import Axes
xshape = yshape = (4,)
# Additional checking of *c* (introduced in #11383).
REGEXP = {
"shape": "^'c' argument has [0-9]+ elements", # shape mismatch
"conversion": "^'c' argument must be a mpl color", # bad vals
}
if re_key is None:
Axes._parse_scatter_color_args(
c=c_case, edgecolors="black", kwargs={},
xshape=xshape, yshape=yshape,
get_next_color_func=get_next_color)
else:
with pytest.raises(ValueError, match=REGEXP[re_key]):
Axes._parse_scatter_color_args(
c=c_case, edgecolors="black", kwargs={},
xshape=xshape, yshape=yshape,
get_next_color_func=get_next_color)
def _params(c=None, xshape=(2,), yshape=(2,), **kwargs):
edgecolors = kwargs.pop('edgecolors', None)
return (c, edgecolors, kwargs if kwargs is not None else {},
xshape, yshape)
_result = namedtuple('_result', 'c, colors')
@pytest.mark.parametrize('params, expected_result',
[(_params(),
_result(c='b', colors=np.array([[0, 0, 1, 1]]))),
(_params(c='r'),
_result(c='r', colors=np.array([[1, 0, 0, 1]]))),
(_params(c='r', colors='b'),
_result(c='r', colors=np.array([[1, 0, 0, 1]]))),
# color
(_params(color='b'),
_result(c='b', colors=np.array([[0, 0, 1, 1]]))),
(_params(color=['b', 'g']),
_result(c=['b', 'g'], colors=np.array([[0, 0, 1, 1], [0, .5, 0, 1]]))),
])
def test_parse_scatter_color_args(params, expected_result):
def get_next_color():
return 'blue' # currently unused
from matplotlib.axes import Axes
c, colors, _edgecolors = Axes._parse_scatter_color_args(
*params, get_next_color_func=get_next_color)
assert c == expected_result.c
assert_allclose(colors, expected_result.colors)
del _params
del _result
@pytest.mark.parametrize('kwargs, expected_edgecolors',
[(dict(), None),
(dict(c='b'), None),
(dict(edgecolors='r'), 'r'),
(dict(edgecolors=['r', 'g']), ['r', 'g']),
(dict(edgecolor='r'), 'r'),
(dict(edgecolors='face'), 'face'),
(dict(edgecolors='none'), 'none'),
(dict(edgecolor='r', edgecolors='g'), 'r'),
(dict(c='b', edgecolor='r', edgecolors='g'), 'r'),
(dict(color='r'), 'r'),
(dict(color='r', edgecolor='g'), 'g'),
])
def test_parse_scatter_color_args_edgecolors(kwargs, expected_edgecolors):
def get_next_color():
return 'blue' # currently unused
from matplotlib.axes import Axes
c = kwargs.pop('c', None)
edgecolors = kwargs.pop('edgecolors', None)
_, _, result_edgecolors = \
Axes._parse_scatter_color_args(c, edgecolors, kwargs,
xshape=(2,), yshape=(2,),
get_next_color_func=get_next_color)
assert result_edgecolors == expected_edgecolors
def test_as_mpl_axes_api():
# tests the _as_mpl_axes api
from matplotlib.projections.polar import PolarAxes
import matplotlib.axes as maxes
class Polar(object):
def __init__(self):
self.theta_offset = 0
def _as_mpl_axes(self):
# implement the matplotlib axes interface
return PolarAxes, {'theta_offset': self.theta_offset}
prj = Polar()
prj2 = Polar()
prj2.theta_offset = np.pi
prj3 = Polar()
# testing axes creation with plt.axes
ax = plt.axes([0, 0, 1, 1], projection=prj)
assert type(ax) == PolarAxes
ax_via_gca = plt.gca(projection=prj)
assert ax_via_gca is ax
plt.close()
# testing axes creation with gca
ax = plt.gca(projection=prj)
assert type(ax) == maxes._subplots.subplot_class_factory(PolarAxes)
ax_via_gca = plt.gca(projection=prj)
assert ax_via_gca is ax
# try getting the axes given a different polar projection
with pytest.warns(UserWarning) as rec:
ax_via_gca = plt.gca(projection=prj2)
assert len(rec) == 1
assert 'Requested projection is different' in str(rec[0].message)
assert ax_via_gca is not ax
assert ax.get_theta_offset() == 0
assert ax_via_gca.get_theta_offset() == np.pi
# try getting the axes given an == (not is) polar projection
with pytest.warns(UserWarning):
ax_via_gca = plt.gca(projection=prj3)
assert len(rec) == 1
assert 'Requested projection is different' in str(rec[0].message)
assert ax_via_gca is ax
plt.close()
# testing axes creation with subplot
ax = plt.subplot(121, projection=prj)
assert type(ax) == maxes._subplots.subplot_class_factory(PolarAxes)
plt.close()
def test_pyplot_axes():
# test focusing of Axes in other Figure
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
plt.sca(ax1)
assert ax1 is plt.gca()
assert fig1 is plt.gcf()
plt.close(fig1)
plt.close(fig2)
@image_comparison(baseline_images=['log_scales'])
def test_log_scales():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(np.log(np.linspace(0.1, 100)))
ax.set_yscale('log', basey=5.5)
ax.invert_yaxis()
ax.set_xscale('log', basex=9.0)
@image_comparison(baseline_images=['stackplot_test_image',
'stackplot_test_image'])
def test_stackplot():
fig = plt.figure()
x = np.linspace(0, 10, 10)
y1 = 1.0 * x
y2 = 2.0 * x + 1
y3 = 3.0 * x + 2
ax = fig.add_subplot(1, 1, 1)
ax.stackplot(x, y1, y2, y3)
ax.set_xlim((0, 10))
ax.set_ylim((0, 70))
# Reuse testcase from above for a labeled data test
data = {"x": x, "y1": y1, "y2": y2, "y3": y3}
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.stackplot("x", "y1", "y2", "y3", data=data)
ax.set_xlim((0, 10))
ax.set_ylim((0, 70))
@image_comparison(baseline_images=['stackplot_test_baseline'],
remove_text=True)
def test_stackplot_baseline():
np.random.seed(0)
def layers(n, m):
a = np.zeros((m, n))
for i in range(n):
for j in range(5):
x = 1 / (.1 + np.random.random())
y = 2 * np.random.random() - .5
z = 10 / (.1 + np.random.random())
a[:, i] += x * np.exp(-((np.arange(m) / m - y) * z) ** 2)
return a
d = layers(3, 100)
d[50, :] = 0 # test for fixed weighted wiggle (issue #6313)
fig, axs = plt.subplots(2, 2)
axs[0, 0].stackplot(range(100), d.T, baseline='zero')
axs[0, 1].stackplot(range(100), d.T, baseline='sym')
axs[1, 0].stackplot(range(100), d.T, baseline='wiggle')
axs[1, 1].stackplot(range(100), d.T, baseline='weighted_wiggle')
def _bxp_test_helper(
stats_kwargs={}, transform_stats=lambda s: s, bxp_kwargs={}):
np.random.seed(937)
logstats = mpl.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4)), **stats_kwargs)
fig, ax = plt.subplots()
if bxp_kwargs.get('vert', True):
ax.set_yscale('log')
else:
ax.set_xscale('log')
# Work around baseline images generate back when bxp did not respect the
# boxplot.boxprops.linewidth rcParam when patch_artist is False.
if not bxp_kwargs.get('patch_artist', False):
mpl.rcParams['boxplot.boxprops.linewidth'] = \
mpl.rcParams['lines.linewidth']
ax.bxp(transform_stats(logstats), **bxp_kwargs)
@image_comparison(baseline_images=['bxp_baseline'],
extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_baseline():
_bxp_test_helper()
@image_comparison(baseline_images=['bxp_rangewhis'],
extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_rangewhis():
_bxp_test_helper(stats_kwargs=dict(whis='range'))
@image_comparison(baseline_images=['bxp_precentilewhis'],
extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_precentilewhis():
_bxp_test_helper(stats_kwargs=dict(whis=[5, 95]))
@image_comparison(baseline_images=['bxp_with_xlabels'],
extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_with_xlabels():
def transform(stats):
for s, label in zip(stats, list('ABCD')):
s['label'] = label
return stats
_bxp_test_helper(transform_stats=transform)
@image_comparison(baseline_images=['bxp_horizontal'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default',
tol=0.1)
def test_bxp_horizontal():
_bxp_test_helper(bxp_kwargs=dict(vert=False))
@image_comparison(baseline_images=['bxp_with_ylabels'],
extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default',
tol=0.1,)
def test_bxp_with_ylabels():
def transform(stats):
for s, label in zip(stats, list('ABCD')):
s['label'] = label
return stats
_bxp_test_helper(transform_stats=transform, bxp_kwargs=dict(vert=False))
@image_comparison(baseline_images=['bxp_patchartist'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_patchartist():
_bxp_test_helper(bxp_kwargs=dict(patch_artist=True))
@image_comparison(baseline_images=['bxp_custompatchartist'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 100},
style='default')
def test_bxp_custompatchartist():
_bxp_test_helper(bxp_kwargs=dict(
patch_artist=True,
boxprops=dict(facecolor='yellow', edgecolor='green', linestyle=':')))
@image_comparison(baseline_images=['bxp_customoutlier'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_customoutlier():
_bxp_test_helper(bxp_kwargs=dict(
flierprops=dict(linestyle='none', marker='d', markerfacecolor='g')))
@image_comparison(baseline_images=['bxp_withmean_custompoint'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_showcustommean():
_bxp_test_helper(bxp_kwargs=dict(
showmeans=True,
meanprops=dict(linestyle='none', marker='d', markerfacecolor='green'),
))
@image_comparison(baseline_images=['bxp_custombox'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_custombox():
_bxp_test_helper(bxp_kwargs=dict(
boxprops=dict(linestyle='--', color='b', linewidth=3)))
@image_comparison(baseline_images=['bxp_custommedian'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_custommedian():
_bxp_test_helper(bxp_kwargs=dict(
medianprops=dict(linestyle='--', color='b', linewidth=3)))
@image_comparison(baseline_images=['bxp_customcap'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_customcap():
_bxp_test_helper(bxp_kwargs=dict(
capprops=dict(linestyle='--', color='g', linewidth=3)))
@image_comparison(baseline_images=['bxp_customwhisker'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_customwhisker():
_bxp_test_helper(bxp_kwargs=dict(
whiskerprops=dict(linestyle='-', color='m', linewidth=3)))
@image_comparison(baseline_images=['bxp_withnotch'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_shownotches():
_bxp_test_helper(bxp_kwargs=dict(shownotches=True))
@image_comparison(baseline_images=['bxp_nocaps'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_nocaps():
_bxp_test_helper(bxp_kwargs=dict(showcaps=False))
@image_comparison(baseline_images=['bxp_nobox'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_nobox():
_bxp_test_helper(bxp_kwargs=dict(showbox=False))
@image_comparison(baseline_images=['bxp_no_flier_stats'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_no_flier_stats():
def transform(stats):
for s in stats:
s.pop('fliers', None)
return stats
_bxp_test_helper(transform_stats=transform,
bxp_kwargs=dict(showfliers=False))
@image_comparison(baseline_images=['bxp_withmean_point'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_showmean():
_bxp_test_helper(bxp_kwargs=dict(showmeans=True, meanline=False))
@image_comparison(baseline_images=['bxp_withmean_line'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_showmeanasline():
_bxp_test_helper(bxp_kwargs=dict(showmeans=True, meanline=True))
@image_comparison(baseline_images=['bxp_scalarwidth'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_scalarwidth():
_bxp_test_helper(bxp_kwargs=dict(widths=.25))
@image_comparison(baseline_images=['bxp_customwidths'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_customwidths():
_bxp_test_helper(bxp_kwargs=dict(widths=[0.10, 0.25, 0.65, 0.85]))
@image_comparison(baseline_images=['bxp_custompositions'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_custompositions():
_bxp_test_helper(bxp_kwargs=dict(positions=[1, 5, 6, 7]))
def test_bxp_bad_widths():
with pytest.raises(ValueError):
_bxp_test_helper(bxp_kwargs=dict(widths=[1]))
def test_bxp_bad_positions():
with pytest.raises(ValueError):
_bxp_test_helper(bxp_kwargs=dict(positions=[2, 3]))
@image_comparison(baseline_images=['boxplot', 'boxplot'],
tol=1.28,
style='default')
def test_boxplot():
# Randomness used for bootstrapping.
np.random.seed(937)
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
ax.boxplot([x, x], bootstrap=10000, notch=1)
ax.set_ylim((-30, 30))
# Reuse testcase from above for a labeled data test
data = {"x": [x, x]}
fig, ax = plt.subplots()
ax.boxplot("x", bootstrap=10000, notch=1, data=data)
ax.set_ylim((-30, 30))
@image_comparison(baseline_images=['boxplot_sym2'],
remove_text=True, extensions=['png'],
style='default')
def test_boxplot_sym2():
# Randomness used for bootstrapping.
np.random.seed(937)
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, [ax1, ax2] = plt.subplots(1, 2)
ax1.boxplot([x, x], bootstrap=10000, sym='^')
ax1.set_ylim((-30, 30))
ax2.boxplot([x, x], bootstrap=10000, sym='g')
ax2.set_ylim((-30, 30))
@image_comparison(baseline_images=['boxplot_sym'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_boxplot_sym():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
ax.boxplot([x, x], sym='gs')
ax.set_ylim((-30, 30))
@image_comparison(
baseline_images=['boxplot_autorange_false_whiskers',
'boxplot_autorange_true_whiskers'],
extensions=['png'],
style='default'
)
def test_boxplot_autorange_whiskers():
# Randomness used for bootstrapping.
np.random.seed(937)
x = np.ones(140)
x = np.hstack([0, x, 2])
fig1, ax1 = plt.subplots()
ax1.boxplot([x, x], bootstrap=10000, notch=1)
ax1.set_ylim((-5, 5))
fig2, ax2 = plt.subplots()
ax2.boxplot([x, x], bootstrap=10000, notch=1, autorange=True)
ax2.set_ylim((-5, 5))
def _rc_test_bxp_helper(ax, rc_dict):
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
with matplotlib.rc_context(rc_dict):
ax.boxplot([x, x])
return ax
@image_comparison(baseline_images=['boxplot_rc_parameters'],
savefig_kwarg={'dpi': 100}, remove_text=True,
tol=1, style='default')
def test_boxplot_rc_parameters():
# Randomness used for bootstrapping.
np.random.seed(937)
fig, ax = plt.subplots(3)
rc_axis0 = {
'boxplot.notch': True,
'boxplot.whiskers': [5, 95],
'boxplot.bootstrap': 10000,
'boxplot.flierprops.color': 'b',
'boxplot.flierprops.marker': 'o',
'boxplot.flierprops.markerfacecolor': 'g',
'boxplot.flierprops.markeredgecolor': 'b',
'boxplot.flierprops.markersize': 5,
'boxplot.flierprops.linestyle': '--',
'boxplot.flierprops.linewidth': 2.0,
'boxplot.boxprops.color': 'r',
'boxplot.boxprops.linewidth': 2.0,
'boxplot.boxprops.linestyle': '--',
'boxplot.capprops.color': 'c',
'boxplot.capprops.linewidth': 2.0,
'boxplot.capprops.linestyle': '--',
'boxplot.medianprops.color': 'k',
'boxplot.medianprops.linewidth': 2.0,
'boxplot.medianprops.linestyle': '--',
}
rc_axis1 = {
'boxplot.vertical': False,
'boxplot.whiskers': 'range',
'boxplot.patchartist': True,
}
rc_axis2 = {
'boxplot.whiskers': 2.0,
'boxplot.showcaps': False,
'boxplot.showbox': False,
'boxplot.showfliers': False,
'boxplot.showmeans': True,
'boxplot.meanline': True,
'boxplot.meanprops.color': 'c',
'boxplot.meanprops.linewidth': 2.0,
'boxplot.meanprops.linestyle': '--',
'boxplot.whiskerprops.color': 'r',
'boxplot.whiskerprops.linewidth': 2.0,
'boxplot.whiskerprops.linestyle': '-.',
}
dict_list = [rc_axis0, rc_axis1, rc_axis2]
for axis, rc_axis in zip(ax, dict_list):
_rc_test_bxp_helper(axis, rc_axis)
assert (matplotlib.patches.PathPatch in
[type(t) for t in ax[1].get_children()])
@image_comparison(baseline_images=['boxplot_with_CIarray'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40}, style='default')
def test_boxplot_with_CIarray():
# Randomness used for bootstrapping.
np.random.seed(937)
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig = plt.figure()
ax = fig.add_subplot(111)
CIs = np.array([[-1.5, 3.], [-1., 3.5]])
# show 1 boxplot with mpl medians/conf. intervals, 1 with manual values
ax.boxplot([x, x], bootstrap=10000, usermedians=[None, 1.0],
conf_intervals=CIs, notch=1)
ax.set_ylim((-30, 30))
@image_comparison(baseline_images=['boxplot_no_inverted_whisker'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40}, style='default')
def test_boxplot_no_weird_whisker():
x = np.array([3, 9000, 150, 88, 350, 200000, 1400, 960],
dtype=np.float64)
ax1 = plt.axes()
ax1.boxplot(x)
ax1.set_yscale('log')
ax1.yaxis.grid(False, which='minor')
ax1.xaxis.grid(False)
def test_boxplot_bad_medians_1():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.boxplot(x, usermedians=[1, 2])
def test_boxplot_bad_medians_2():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.boxplot([x, x], usermedians=[[1, 2], [1, 2]])
def test_boxplot_bad_ci_1():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.boxplot([x, x], conf_intervals=[[1, 2]])
def test_boxplot_zorder():
x = np.arange(10)
fix, ax = plt.subplots()
assert ax.boxplot(x)['boxes'][0].get_zorder() == 2
assert ax.boxplot(x, zorder=10)['boxes'][0].get_zorder() == 10
def test_boxplot_bad_ci_2():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.boxplot([x, x], conf_intervals=[[1, 2], [1]])
@image_comparison(baseline_images=['boxplot_mod_artists_after_plotting'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40}, style='default')
def test_boxplot_mod_artist_after_plotting():
x = [0.15, 0.11, 0.06, 0.06, 0.12, 0.56, -0.56]
fig, ax = plt.subplots()
bp = ax.boxplot(x, sym="o")
for key in bp:
for obj in bp[key]:
obj.set_color('green')
@image_comparison(baseline_images=['violinplot_vert_baseline',
'violinplot_vert_baseline'],
extensions=['png'])
def test_vert_violinplot_baseline():
# First 9 digits of frac(sqrt(2))
np.random.seed(414213562)
data = [np.random.normal(size=100) for i in range(4)]
ax = plt.axes()
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=0)
# Reuse testcase from above for a labeled data test
data = {"d": data}
fig, ax = plt.subplots()
ax = plt.axes()
ax.violinplot("d", positions=range(4), showmeans=0, showextrema=0,
showmedians=0, data=data)
@image_comparison(baseline_images=['violinplot_vert_showmeans'],
extensions=['png'])
def test_vert_violinplot_showmeans():
ax = plt.axes()
# First 9 digits of frac(sqrt(3))
np.random.seed(732050807)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=1, showextrema=0,
showmedians=0)
@image_comparison(baseline_images=['violinplot_vert_showextrema'],
extensions=['png'])
def test_vert_violinplot_showextrema():
ax = plt.axes()
# First 9 digits of frac(sqrt(5))
np.random.seed(236067977)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=1,
showmedians=0)
@image_comparison(baseline_images=['violinplot_vert_showmedians'],
extensions=['png'])
def test_vert_violinplot_showmedians():
ax = plt.axes()
# First 9 digits of frac(sqrt(7))
np.random.seed(645751311)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=1)
@image_comparison(baseline_images=['violinplot_vert_showall'],
extensions=['png'])
def test_vert_violinplot_showall():
ax = plt.axes()
# First 9 digits of frac(sqrt(11))
np.random.seed(316624790)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=1, showextrema=1,
showmedians=1)
@image_comparison(baseline_images=['violinplot_vert_custompoints_10'],
extensions=['png'])
def test_vert_violinplot_custompoints_10():
ax = plt.axes()
# First 9 digits of frac(sqrt(13))
np.random.seed(605551275)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=0, points=10)
@image_comparison(baseline_images=['violinplot_vert_custompoints_200'],
extensions=['png'])
def test_vert_violinplot_custompoints_200():
ax = plt.axes()
# First 9 digits of frac(sqrt(17))
np.random.seed(123105625)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=0, points=200)
@image_comparison(baseline_images=['violinplot_horiz_baseline'],
extensions=['png'])
def test_horiz_violinplot_baseline():
ax = plt.axes()
# First 9 digits of frac(sqrt(19))
np.random.seed(358898943)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=0)
@image_comparison(baseline_images=['violinplot_horiz_showmedians'],
extensions=['png'])
def test_horiz_violinplot_showmedians():
ax = plt.axes()
# First 9 digits of frac(sqrt(23))
np.random.seed(795831523)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=1)
@image_comparison(baseline_images=['violinplot_horiz_showmeans'],
extensions=['png'])
def test_horiz_violinplot_showmeans():
ax = plt.axes()
# First 9 digits of frac(sqrt(29))
np.random.seed(385164807)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=1,
showextrema=0, showmedians=0)
@image_comparison(baseline_images=['violinplot_horiz_showextrema'],
extensions=['png'])
def test_horiz_violinplot_showextrema():
ax = plt.axes()
# First 9 digits of frac(sqrt(31))
np.random.seed(567764362)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=1, showmedians=0)
@image_comparison(baseline_images=['violinplot_horiz_showall'],
extensions=['png'])
def test_horiz_violinplot_showall():
ax = plt.axes()
# First 9 digits of frac(sqrt(37))
np.random.seed(82762530)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=1,
showextrema=1, showmedians=1)
@image_comparison(baseline_images=['violinplot_horiz_custompoints_10'],
extensions=['png'])
def test_horiz_violinplot_custompoints_10():
ax = plt.axes()
# First 9 digits of frac(sqrt(41))
np.random.seed(403124237)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=0, points=10)
@image_comparison(baseline_images=['violinplot_horiz_custompoints_200'],
extensions=['png'])
def test_horiz_violinplot_custompoints_200():
ax = plt.axes()
# First 9 digits of frac(sqrt(43))
np.random.seed(557438524)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=0, points=200)
def test_violinplot_bad_positions():
ax = plt.axes()
# First 9 digits of frac(sqrt(47))
np.random.seed(855654600)
data = [np.random.normal(size=100) for i in range(4)]
with pytest.raises(ValueError):
ax.violinplot(data, positions=range(5))
def test_violinplot_bad_widths():
ax = plt.axes()
# First 9 digits of frac(sqrt(53))
np.random.seed(280109889)
data = [np.random.normal(size=100) for i in range(4)]
with pytest.raises(ValueError):
ax.violinplot(data, positions=range(4), widths=[1, 2, 3])
def test_manage_xticks():
_, ax = plt.subplots()
ax.set_xlim(0, 4)
old_xlim = ax.get_xlim()
np.random.seed(0)
y1 = np.random.normal(10, 3, 20)
y2 = np.random.normal(3, 1, 20)
ax.boxplot([y1, y2], positions=[1, 2], manage_ticks=False)
new_xlim = ax.get_xlim()
assert_array_equal(old_xlim, new_xlim)
def test_boxplot_not_single():
fig, ax = plt.subplots()
ax.boxplot(np.random.rand(100), positions=[3])
ax.boxplot(np.random.rand(100), positions=[5])
fig.canvas.draw()
assert ax.get_xlim() == (2.5, 5.5)
assert list(ax.get_xticks()) == [3, 5]
assert [t.get_text() for t in ax.get_xticklabels()] == ["3", "5"]
def test_tick_space_size_0():
# allow font size to be zero, which affects ticks when there is
# no other text in the figure.
plt.plot([0, 1], [0, 1])
matplotlib.rcParams.update({'font.size': 0})
b = io.BytesIO()
plt.savefig(b, dpi=80, format='raw')
@image_comparison(baseline_images=['errorbar_basic', 'errorbar_mixed',
'errorbar_basic'])
def test_errorbar():
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
yerr = 0.1 + 0.2*np.sqrt(x)
xerr = 0.1 + yerr
# First illustrate basic pyplot interface, using defaults where possible.
fig = plt.figure()
ax = fig.gca()
ax.errorbar(x, y, xerr=0.2, yerr=0.4)
ax.set_title("Simplest errorbars, 0.2 in x, 0.4 in y")
# Now switch to a more OO interface to exercise more features.
fig, axs = plt.subplots(nrows=2, ncols=2, sharex=True)
ax = axs[0, 0]
# Try a Nx1 shaped error just to check
with pytest.warns(MatplotlibDeprecationWarning):
ax.errorbar(x, y, yerr=np.reshape(yerr, (len(y), 1)), fmt='o')
ax.set_title('Vert. symmetric')
# With 4 subplots, reduce the number of axis ticks to avoid crowding.
ax.locator_params(nbins=4)
ax = axs[0, 1]
ax.errorbar(x, y, xerr=xerr, fmt='o', alpha=0.4)
ax.set_title('Hor. symmetric w/ alpha')
ax = axs[1, 0]
ax.errorbar(x, y, yerr=[yerr, 2*yerr], xerr=[xerr, 2*xerr], fmt='--o')
ax.set_title('H, V asymmetric')
ax = axs[1, 1]
ax.set_yscale('log')
# Here we have to be careful to keep all y values positive:
ylower = np.maximum(1e-2, y - yerr)
yerr_lower = y - ylower
ax.errorbar(x, y, yerr=[yerr_lower, 2*yerr], xerr=xerr,
fmt='o', ecolor='g', capthick=2)
ax.set_title('Mixed sym., log y')
fig.suptitle('Variable errorbars')
# Reuse the first testcase from above for a labeled data test
data = {"x": x, "y": y}
fig = plt.figure()
ax = fig.gca()
ax.errorbar("x", "y", xerr=0.2, yerr=0.4, data=data)
ax.set_title("Simplest errorbars, 0.2 in x, 0.4 in y")
def test_errorbar_colorcycle():
f, ax = plt.subplots()
x = np.arange(10)
y = 2*x
e1, _, _ = ax.errorbar(x, y, c=None)
e2, _, _ = ax.errorbar(x, 2*y, c=None)
ln1, = ax.plot(x, 4*y)
assert mcolors.to_rgba(e1.get_color()) == mcolors.to_rgba('C0')
assert mcolors.to_rgba(e2.get_color()) == mcolors.to_rgba('C1')
assert mcolors.to_rgba(ln1.get_color()) == mcolors.to_rgba('C2')
def test_errorbar_shape():
fig = plt.figure()
ax = fig.gca()
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
yerr1 = 0.1 + 0.2*np.sqrt(x)
yerr = np.vstack((yerr1, 2*yerr1)).T
xerr = 0.1 + yerr
with pytest.raises(ValueError):
ax.errorbar(x, y, yerr=yerr, fmt='o')
with pytest.raises(ValueError):
ax.errorbar(x, y, xerr=xerr, fmt='o')
with pytest.raises(ValueError):
ax.errorbar(x, y, yerr=yerr, xerr=xerr, fmt='o')
@image_comparison(baseline_images=['errorbar_limits'])
def test_errorbar_limits():
x = np.arange(0.5, 5.5, 0.5)
y = np.exp(-x)
xerr = 0.1
yerr = 0.2
ls = 'dotted'
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# standard error bars
plt.errorbar(x, y, xerr=xerr, yerr=yerr, ls=ls, color='blue')
# including upper limits
uplims = np.zeros_like(x)
uplims[[1, 5, 9]] = True
plt.errorbar(x, y+0.5, xerr=xerr, yerr=yerr, uplims=uplims, ls=ls,
color='green')
# including lower limits
lolims = np.zeros_like(x)
lolims[[2, 4, 8]] = True
plt.errorbar(x, y+1.0, xerr=xerr, yerr=yerr, lolims=lolims, ls=ls,
color='red')
# including upper and lower limits
plt.errorbar(x, y+1.5, marker='o', ms=8, xerr=xerr, yerr=yerr,
lolims=lolims, uplims=uplims, ls=ls, color='magenta')
# including xlower and xupper limits
xerr = 0.2
yerr = np.full_like(x, 0.2)
yerr[[3, 6]] = 0.3
xlolims = lolims
xuplims = uplims
lolims = np.zeros_like(x)
uplims = np.zeros_like(x)
lolims[[6]] = True
uplims[[3]] = True
plt.errorbar(x, y+2.1, marker='o', ms=8, xerr=xerr, yerr=yerr,
xlolims=xlolims, xuplims=xuplims, uplims=uplims,
lolims=lolims, ls='none', mec='blue', capsize=0,
color='cyan')
ax.set_xlim((0, 5.5))
ax.set_title('Errorbar upper and lower limits')
def test_errobar_nonefmt():
# Check that passing 'none' as a format still plots errorbars
x = np.arange(5)
y = np.arange(5)
plotline, _, barlines = plt.errorbar(x, y, xerr=1, yerr=1, fmt='none')
assert plotline is None
for errbar in barlines:
assert np.all(errbar.get_color() == mcolors.to_rgba('C0'))
@image_comparison(baseline_images=['errorbar_with_prop_cycle'],
extensions=['png'], style='mpl20', remove_text=True)
def test_errorbar_with_prop_cycle():
_cycle = cycler(ls=['--', ':'], marker=['s', 's'], mfc=['k', 'w'])
plt.rc("axes", prop_cycle=_cycle)
fig, ax = plt.subplots()
ax.errorbar(x=[2, 4, 10], y=[3, 2, 4], yerr=0.5)
ax.errorbar(x=[2, 4, 10], y=[6, 4, 2], yerr=0.5)
@image_comparison(baseline_images=['hist_stacked_stepfilled',
'hist_stacked_stepfilled'])
def test_hist_stacked_stepfilled():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), histtype="stepfilled", stacked=True)
# Reuse testcase from above for a labeled data test
data = {"x": (d1, d2)}
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist("x", histtype="stepfilled", stacked=True, data=data)
@image_comparison(baseline_images=['hist_offset'])
def test_hist_offset():
# make some data
d1 = np.linspace(0, 10, 50)
d2 = np.linspace(1, 3, 20)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(d1, bottom=5)
ax.hist(d2, bottom=15)
@image_comparison(baseline_images=['hist_step'], extensions=['png'],
remove_text=True)
def test_hist_step():
# make some data
d1 = np.linspace(1, 3, 20)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(d1, histtype="step")
ax.set_ylim(0, 10)
ax.set_xlim(-1, 5)
@image_comparison(baseline_images=['hist_step_horiz'], extensions=['png'])
def test_hist_step_horiz():
# make some data
d1 = np.linspace(0, 10, 50)
d2 = np.linspace(1, 3, 20)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), histtype="step", orientation="horizontal")
@image_comparison(baseline_images=['hist_stacked_weights'])
def test_hist_stacked_weighted():
# make some data
d1 = np.linspace(0, 10, 50)
d2 = np.linspace(1, 3, 20)
w1 = np.linspace(0.01, 3.5, 50)
w2 = np.linspace(0.05, 2., 20)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), weights=(w1, w2), histtype="stepfilled", stacked=True)
@pytest.mark.parametrize("use_line_collection", [True, False],
ids=['w/ line collection', 'w/o line collection'])
@image_comparison(baseline_images=['stem'], extensions=['png'], style='mpl20',
remove_text=True)
def test_stem(use_line_collection):
x = np.linspace(0.1, 2 * np.pi, 100)
args = (x, np.cos(x))
# Label is a single space to force a legend to be drawn, but to avoid any
# text being drawn
kwargs = dict(linefmt='C2-.', markerfmt='k+', basefmt='C1-.',
label=' ', use_line_collection=use_line_collection)
fig, ax = plt.subplots()
if use_line_collection:
ax.stem(*args, **kwargs)
else:
with pytest.warns(UserWarning):
ax.stem(*args, **kwargs)
ax.legend()
@check_figures_equal(extensions=['png'])
def test_stem_params(fig_test, fig_ref):
x = np.linspace(0, 3.14, 37)
y = np.sin(x)
ax = fig_test.subplots()
ax.stem(x, y, linefmt='grey', use_line_collection=True)
ax = fig_ref.subplots()
with pytest.warns(UserWarning):
ax.stem(x, y, linefmt='grey')
def test_stem_args():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
x = list(range(10))
y = list(range(10))
# Test the call signatures
ax.stem(y, use_line_collection=True)
ax.stem(x, y, use_line_collection=True)
ax.stem(x, y, 'r--', use_line_collection=True)
ax.stem(x, y, 'r--', basefmt='b--', use_line_collection=True)
def test_stem_dates():
fig, ax = plt.subplots(1, 1)
from dateutil import parser
x = parser.parse("2013-9-28 11:00:00")
y = 100
x1 = parser.parse("2013-9-28 12:00:00")
y1 = 200
ax.stem([x, x1], [y, y1], "*-", use_line_collection=True)
@image_comparison(baseline_images=['hist_stacked_stepfilled_alpha'])
def test_hist_stacked_stepfilled_alpha():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), histtype="stepfilled", stacked=True, alpha=0.5)
@image_comparison(baseline_images=['hist_stacked_step'])
def test_hist_stacked_step():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), histtype="step", stacked=True)
@image_comparison(baseline_images=['hist_stacked_normed',
'hist_stacked_normed'])
def test_hist_stacked_density():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig, ax = plt.subplots()
ax.hist((d1, d2), stacked=True, density=True)
# Also check that the old keyword works.
fig, ax = plt.subplots()
with pytest.warns(UserWarning):
ax.hist((d1, d2), stacked=True, normed=True)
@pytest.mark.parametrize('normed', [False, True])
@pytest.mark.parametrize('density', [False, True])
def test_hist_normed_density(normed, density):
# Normed and density should not be used simultaneously
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig, ax = plt.subplots()
# test that kwargs normed and density cannot be set both.
with pytest.raises(Exception):
ax.hist((d1, d2), stacked=True, normed=normed, density=density)
@image_comparison(baseline_images=['hist_step_bottom'], extensions=['png'],
remove_text=True)
def test_hist_step_bottom():
# make some data
d1 = np.linspace(1, 3, 20)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(d1, bottom=np.arange(10), histtype="stepfilled")
@image_comparison(baseline_images=['hist_stacked_bar'])
def test_hist_stacked_bar():
# make some data
d = [[100, 100, 100, 100, 200, 320, 450, 80, 20, 600, 310, 800],
[20, 23, 50, 11, 100, 420], [120, 120, 120, 140, 140, 150, 180],
[60, 60, 60, 60, 300, 300, 5, 5, 5, 5, 10, 300],
[555, 555, 555, 30, 30, 30, 30, 30, 100, 100, 100, 100, 30, 30],
[30, 30, 30, 30, 400, 400, 400, 400, 400, 400, 400, 400]]
colors = [(0.5759849696758961, 1.0, 0.0), (0.0, 1.0, 0.350624650815206),
(0.0, 1.0, 0.6549834156005998), (0.0, 0.6569064625276622, 1.0),
(0.28302699607823545, 0.0, 1.0), (0.6849123462299822, 0.0, 1.0)]
labels = ['green', 'orange', ' yellow', 'magenta', 'black']
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(d, bins=10, histtype='barstacked', align='mid', color=colors,
label=labels)
ax.legend(loc='upper right', bbox_to_anchor=(1.0, 1.0), ncol=1)
def test_hist_emptydata():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist([[], range(10), range(10)], histtype="step")
def test_hist_labels():
# test singleton labels OK
fig, ax = plt.subplots()
l = ax.hist([0, 1], label=0)
assert l[2][0].get_label() == '0'
l = ax.hist([0, 1], label=[0])
assert l[2][0].get_label() == '0'
l = ax.hist([0, 1], label=None)
assert l[2][0].get_label() == '_nolegend_'
l = ax.hist([0, 1], label='0')
assert l[2][0].get_label() == '0'
l = ax.hist([0, 1], label='00')
assert l[2][0].get_label() == '00'
@image_comparison(baseline_images=['transparent_markers'], remove_text=True)
def test_transparent_markers():
np.random.seed(0)
data = np.random.random(50)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data, 'D', mfc='none', markersize=100)
@image_comparison(baseline_images=['rgba_markers'], remove_text=True)
def test_rgba_markers():
fig, axs = plt.subplots(ncols=2)
rcolors = [(1, 0, 0, 1), (1, 0, 0, 0.5)]
bcolors = [(0, 0, 1, 1), (0, 0, 1, 0.5)]
alphas = [None, 0.2]
kw = dict(ms=100, mew=20)
for i, alpha in enumerate(alphas):
for j, rcolor in enumerate(rcolors):
for k, bcolor in enumerate(bcolors):
axs[i].plot(j+1, k+1, 'o', mfc=bcolor, mec=rcolor,
alpha=alpha, **kw)
axs[i].plot(j+1, k+3, 'x', mec=rcolor, alpha=alpha, **kw)
for ax in axs:
ax.axis([-1, 4, 0, 5])
@image_comparison(baseline_images=['mollweide_grid'], remove_text=True)
def test_mollweide_grid():
# test that both horizontal and vertical gridlines appear on the Mollweide
# projection
fig = plt.figure()
ax = fig.add_subplot(111, projection='mollweide')
ax.grid()
def test_mollweide_forward_inverse_closure():
# test that the round-trip Mollweide forward->inverse transformation is an
# approximate identity
fig = plt.figure()
ax = fig.add_subplot(111, projection='mollweide')
# set up 1-degree grid in longitude, latitude
lon = np.linspace(-np.pi, np.pi, 360)
lat = np.linspace(-np.pi / 2.0, np.pi / 2.0, 180)
lon, lat = np.meshgrid(lon, lat)
ll = np.vstack((lon.flatten(), lat.flatten())).T
# perform forward transform
xy = ax.transProjection.transform(ll)
# perform inverse transform
ll2 = ax.transProjection.inverted().transform(xy)
# compare
np.testing.assert_array_almost_equal(ll, ll2, 3)
def test_mollweide_inverse_forward_closure():
# test that the round-trip Mollweide inverse->forward transformation is an
# approximate identity
fig = plt.figure()
ax = fig.add_subplot(111, projection='mollweide')
# set up grid in x, y
x = np.linspace(0, 1, 500)
x, y = np.meshgrid(x, x)
xy = np.vstack((x.flatten(), y.flatten())).T
# perform inverse transform
ll = ax.transProjection.inverted().transform(xy)
# perform forward transform
xy2 = ax.transProjection.transform(ll)
# compare
np.testing.assert_array_almost_equal(xy, xy2, 3)
@image_comparison(baseline_images=['test_alpha'], remove_text=True)
def test_alpha():
np.random.seed(0)
data = np.random.random(50)
fig = plt.figure()
ax = fig.add_subplot(111)
# alpha=.5 markers, solid line
ax.plot(data, '-D', color=[1, 0, 0], mfc=[1, 0, 0, .5],
markersize=20, lw=10)
# everything solid by kwarg
ax.plot(data + 2, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0, .5],
markersize=20, lw=10,
alpha=1)
# everything alpha=.5 by kwarg
ax.plot(data + 4, '-D', color=[1, 0, 0], mfc=[1, 0, 0],
markersize=20, lw=10,
alpha=.5)
# everything alpha=.5 by colors
ax.plot(data + 6, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0, .5],
markersize=20, lw=10)
# alpha=.5 line, solid markers
ax.plot(data + 8, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0],
markersize=20, lw=10)
@image_comparison(baseline_images=['eventplot', 'eventplot'], remove_text=True)
def test_eventplot():
'''
test that eventplot produces the correct output
'''
np.random.seed(0)
data1 = np.random.random([32, 20]).tolist()
data2 = np.random.random([6, 20]).tolist()
data = data1 + data2
num_datasets = len(data)
colors1 = [[0, 1, .7]] * len(data1)
colors2 = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, .75, 0],
[1, 0, 1],
[0, 1, 1]]
colors = colors1 + colors2
lineoffsets1 = 12 + np.arange(0, len(data1)) * .33
lineoffsets2 = [-15, -3, 1, 1.5, 6, 10]
lineoffsets = lineoffsets1.tolist() + lineoffsets2
linelengths1 = [.33] * len(data1)
linelengths2 = [5, 2, 1, 1, 3, 1.5]
linelengths = linelengths1 + linelengths2
fig = plt.figure()
axobj = fig.add_subplot(111)
colls = axobj.eventplot(data, colors=colors, lineoffsets=lineoffsets,
linelengths=linelengths)
num_collections = len(colls)
assert num_collections == num_datasets
# Reuse testcase from above for a labeled data test
data = {"pos": data, "c": colors, "lo": lineoffsets, "ll": linelengths}
fig = plt.figure()
axobj = fig.add_subplot(111)
colls = axobj.eventplot("pos", colors="c", lineoffsets="lo",
linelengths="ll", data=data)
num_collections = len(colls)
assert num_collections == num_datasets
@image_comparison(baseline_images=['test_eventplot_defaults'],
extensions=['png'], remove_text=True)
def test_eventplot_defaults():
'''
test that eventplot produces the correct output given the default params
(see bug #3728)
'''
np.random.seed(0)
data1 = np.random.random([32, 20]).tolist()
data2 = np.random.random([6, 20]).tolist()
data = data1 + data2
fig = plt.figure()
axobj = fig.add_subplot(111)
colls = axobj.eventplot(data)
@pytest.mark.parametrize(('colors'), [
('0.5',), # string color with multiple characters: not OK before #8193 fix
('tab:orange', 'tab:pink', 'tab:cyan', 'bLacK'), # case-insensitive
('red', (0, 1, 0), None, (1, 0, 1, 0.5)), # a tricky case mixing types
('rgbk',) # len('rgbk') == len(data) and each character is a valid color
])
def test_eventplot_colors(colors):
'''Test the *colors* parameter of eventplot. Inspired by the issue #8193.
'''
data = [[i] for i in range(4)] # 4 successive events of different nature
# Build the list of the expected colors
expected = [c if c is not None else 'C0' for c in colors]
# Convert the list into an array of RGBA values
# NB: ['rgbk'] is not a valid argument for to_rgba_array, while 'rgbk' is.
if len(expected) == 1:
expected = expected[0]
expected = np.broadcast_to(mcolors.to_rgba_array(expected), (len(data), 4))
fig, ax = plt.subplots()
if len(colors) == 1: # tuple with a single string (like '0.5' or 'rgbk')
colors = colors[0]
collections = ax.eventplot(data, colors=colors)
for coll, color in zip(collections, expected):
assert_allclose(coll.get_color(), color)
@image_comparison(baseline_images=['test_eventplot_problem_kwargs'],
extensions=['png'], remove_text=True)
def test_eventplot_problem_kwargs():
'''
test that 'singular' versions of LineCollection props raise an
IgnoredKeywordWarning rather than overriding the 'plural' versions (e.g.
to prevent 'color' from overriding 'colors', see issue #4297)
'''
np.random.seed(0)
data1 = np.random.random([20]).tolist()
data2 = np.random.random([10]).tolist()
data = [data1, data2]
fig = plt.figure()
axobj = fig.add_subplot(111)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
colls = axobj.eventplot(data,
colors=['r', 'b'],
color=['c', 'm'],
linewidths=[2, 1],
linewidth=[1, 2],
linestyles=['solid', 'dashed'],
linestyle=['dashdot', 'dotted'])
# check that three IgnoredKeywordWarnings were raised
assert len(w) == 3
assert all(issubclass(wi.category, IgnoredKeywordWarning) for wi in w)
def test_empty_eventplot():
fig, ax = plt.subplots(1, 1)
ax.eventplot([[]], colors=[(0.0, 0.0, 0.0, 0.0)])
plt.draw()
@pytest.mark.parametrize('data, orientation', product(
([[]], [[], [0, 1]], [[0, 1], []]),
('_empty', 'vertical', 'horizontal', None, 'none')))
def test_eventplot_orientation(data, orientation):
"""Introduced when fixing issue #6412. """
opts = {} if orientation == "_empty" else {'orientation': orientation}
fig, ax = plt.subplots(1, 1)
ax.eventplot(data, **opts)
plt.draw()
@image_comparison(baseline_images=['marker_styles'], extensions=['png'],
remove_text=True)
def test_marker_styles():
fig = plt.figure()
ax = fig.add_subplot(111)
for y, marker in enumerate(sorted(matplotlib.markers.MarkerStyle.markers,
key=lambda x: str(type(x))+str(x))):
ax.plot((y % 2)*5 + np.arange(10)*10, np.ones(10)*10*y, linestyle='',
marker=marker, markersize=10+y/5, label=marker)
@image_comparison(baseline_images=['rc_markerfill'], extensions=['png'])
def test_markers_fillstyle_rcparams():
fig, ax = plt.subplots()
x = np.arange(7)
for idx, (style, marker) in enumerate(
[('top', 's'), ('bottom', 'o'), ('none', '^')]):
matplotlib.rcParams['markers.fillstyle'] = style
ax.plot(x+idx, marker=marker)
@image_comparison(baseline_images=['vertex_markers'], extensions=['png'],
remove_text=True)
def test_vertex_markers():
data = list(range(10))
marker_as_tuple = ((-1, -1), (1, -1), (1, 1), (-1, 1))
marker_as_list = [(-1, -1), (1, -1), (1, 1), (-1, 1)]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data, linestyle='', marker=marker_as_tuple, mfc='k')
ax.plot(data[::-1], linestyle='', marker=marker_as_list, mfc='b')
ax.set_xlim([-1, 10])
ax.set_ylim([-1, 10])
@image_comparison(baseline_images=['vline_hline_zorder',
'errorbar_zorder'],
tol={'aarch64': 0.02}.get(platform.machine(), 0.0))
def test_eb_line_zorder():
x = list(range(10))
# First illustrate basic pyplot interface, using defaults where possible.
fig = plt.figure()
ax = fig.gca()
ax.plot(x, lw=10, zorder=5)
ax.axhline(1, color='red', lw=10, zorder=1)
ax.axhline(5, color='green', lw=10, zorder=10)
ax.axvline(7, color='m', lw=10, zorder=7)
ax.axvline(2, color='k', lw=10, zorder=3)
ax.set_title("axvline and axhline zorder test")
# Now switch to a more OO interface to exercise more features.
fig = plt.figure()
ax = fig.gca()
x = list(range(10))
y = np.zeros(10)
yerr = list(range(10))
ax.errorbar(x, y, yerr=yerr, zorder=5, lw=5, color='r')
for j in range(10):
ax.axhline(j, lw=5, color='k', zorder=j)
ax.axhline(-j, lw=5, color='k', zorder=j)
ax.set_title("errorbar zorder test")
@image_comparison(
baseline_images=['vlines_basic', 'vlines_with_nan', 'vlines_masked'],
extensions=['png']
)
def test_vlines():
# normal
x1 = [2, 3, 4, 5, 7]
y1 = [2, -6, 3, 8, 2]
fig1, ax1 = plt.subplots()
ax1.vlines(x1, 0, y1, colors='g', linewidth=5)
# GH #7406
x2 = [2, 3, 4, 5, 6, 7]
y2 = [2, -6, 3, 8, np.nan, 2]
fig2, (ax2, ax3, ax4) = plt.subplots(nrows=3, figsize=(4, 8))
ax2.vlines(x2, 0, y2, colors='g', linewidth=5)
x3 = [2, 3, 4, 5, 6, 7]
y3 = [np.nan, 2, -6, 3, 8, 2]
ax3.vlines(x3, 0, y3, colors='r', linewidth=3, linestyle='--')
x4 = [2, 3, 4, 5, 6, 7]
y4 = [np.nan, 2, -6, 3, 8, np.nan]
ax4.vlines(x4, 0, y4, colors='k', linewidth=2)
# tweak the x-axis so we can see the lines better
for ax in [ax1, ax2, ax3, ax4]:
ax.set_xlim(0, 10)
# check that the y-lims are all automatically the same
assert ax1.get_ylim() == ax2.get_ylim()
assert ax1.get_ylim() == ax3.get_ylim()
assert ax1.get_ylim() == ax4.get_ylim()
fig3, ax5 = plt.subplots()
x5 = np.ma.masked_equal([2, 4, 6, 8, 10, 12], 8)
ymin5 = np.ma.masked_equal([0, 1, -1, 0, 2, 1], 2)
ymax5 = np.ma.masked_equal([13, 14, 15, 16, 17, 18], 18)
ax5.vlines(x5, ymin5, ymax5, colors='k', linewidth=2)
ax5.set_xlim(0, 15)
@image_comparison(
baseline_images=['hlines_basic', 'hlines_with_nan', 'hlines_masked'],
extensions=['png']
)
def test_hlines():
# normal
y1 = [2, 3, 4, 5, 7]
x1 = [2, -6, 3, 8, 2]
fig1, ax1 = plt.subplots()
ax1.hlines(y1, 0, x1, colors='g', linewidth=5)
# GH #7406
y2 = [2, 3, 4, 5, 6, 7]
x2 = [2, -6, 3, 8, np.nan, 2]
fig2, (ax2, ax3, ax4) = plt.subplots(nrows=3, figsize=(4, 8))
ax2.hlines(y2, 0, x2, colors='g', linewidth=5)
y3 = [2, 3, 4, 5, 6, 7]
x3 = [np.nan, 2, -6, 3, 8, 2]
ax3.hlines(y3, 0, x3, colors='r', linewidth=3, linestyle='--')
y4 = [2, 3, 4, 5, 6, 7]
x4 = [np.nan, 2, -6, 3, 8, np.nan]
ax4.hlines(y4, 0, x4, colors='k', linewidth=2)
# tweak the y-axis so we can see the lines better
for ax in [ax1, ax2, ax3, ax4]:
ax.set_ylim(0, 10)
# check that the x-lims are all automatically the same
assert ax1.get_xlim() == ax2.get_xlim()
assert ax1.get_xlim() == ax3.get_xlim()
assert ax1.get_xlim() == ax4.get_xlim()
fig3, ax5 = plt.subplots()
y5 = np.ma.masked_equal([2, 4, 6, 8, 10, 12], 8)
xmin5 = np.ma.masked_equal([0, 1, -1, 0, 2, 1], 2)
xmax5 = np.ma.masked_equal([13, 14, 15, 16, 17, 18], 18)
ax5.hlines(y5, xmin5, xmax5, colors='k', linewidth=2)
ax5.set_ylim(0, 15)
@image_comparison(baseline_images=['step_linestyle', 'step_linestyle'],
remove_text=True)
def test_step_linestyle():
x = y = np.arange(10)
# First illustrate basic pyplot interface, using defaults where possible.
fig, ax_lst = plt.subplots(2, 2)
ax_lst = ax_lst.flatten()
ln_styles = ['-', '--', '-.', ':']
for ax, ls in zip(ax_lst, ln_styles):
ax.step(x, y, lw=5, linestyle=ls, where='pre')
ax.step(x, y + 1, lw=5, linestyle=ls, where='mid')
ax.step(x, y + 2, lw=5, linestyle=ls, where='post')
ax.set_xlim([-1, 5])
ax.set_ylim([-1, 7])
# Reuse testcase from above for a labeled data test
data = {"X": x, "Y0": y, "Y1": y+1, "Y2": y+2}
fig, ax_lst = plt.subplots(2, 2)
ax_lst = ax_lst.flatten()
ln_styles = ['-', '--', '-.', ':']
for ax, ls in zip(ax_lst, ln_styles):
ax.step("X", "Y0", lw=5, linestyle=ls, where='pre', data=data)
ax.step("X", "Y1", lw=5, linestyle=ls, where='mid', data=data)
ax.step("X", "Y2", lw=5, linestyle=ls, where='post', data=data)
ax.set_xlim([-1, 5])
ax.set_ylim([-1, 7])
@image_comparison(baseline_images=['mixed_collection'], remove_text=True)
def test_mixed_collection():
from matplotlib import patches
from matplotlib import collections
x = list(range(10))
# First illustrate basic pyplot interface, using defaults where possible.
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
c = patches.Circle((8, 8), radius=4, facecolor='none', edgecolor='green')
# PDF can optimize this one
p1 = collections.PatchCollection([c], match_original=True)
p1.set_offsets([[0, 0], [24, 24]])
p1.set_linewidths([1, 5])
# PDF can't optimize this one, because the alpha of the edge changes
p2 = collections.PatchCollection([c], match_original=True)
p2.set_offsets([[48, 0], [-32, -16]])
p2.set_linewidths([1, 5])
p2.set_edgecolors([[0, 0, 0.1, 1.0], [0, 0, 0.1, 0.5]])
ax.patch.set_color('0.5')
ax.add_collection(p1)
ax.add_collection(p2)
ax.set_xlim(0, 16)
ax.set_ylim(0, 16)
def test_subplot_key_hash():
ax = plt.subplot(np.float64(5.5), np.int64(1), np.float64(1.2))
ax.twinx()
assert ax.get_subplotspec().get_geometry() == (5, 1, 0, 0)
@image_comparison(baseline_images=['specgram_freqs',
'specgram_freqs_linear'],
remove_text=True, extensions=['png'], tol=0.07,
style='default')
def test_specgram_freqs():
'''test axes.specgram in default (psd) mode with sinusoidal stimuli'''
n = 1000
Fs = 10.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(10 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for fstim1, fstim2 in zip(fstims1, fstims2):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
y = np.hstack([y1, y2])
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided')
spec21 = ax21.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default',
scale='linear', norm=matplotlib.colors.LogNorm())
spec22 = ax22.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
scale='linear', norm=matplotlib.colors.LogNorm())
spec23 = ax23.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
scale='linear', norm=matplotlib.colors.LogNorm())
@image_comparison(baseline_images=['specgram_noise',
'specgram_noise_linear'],
remove_text=True, extensions=['png'], tol=0.01,
style='default')
def test_specgram_noise():
'''test axes.specgram in default (psd) mode with noise stimuli'''
np.random.seed(0)
n = 1000
Fs = 10.
NFFT = int(10 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2])
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided')
spec21 = ax21.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default',
scale='linear', norm=matplotlib.colors.LogNorm())
spec22 = ax22.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
scale='linear', norm=matplotlib.colors.LogNorm())
spec23 = ax23.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
scale='linear', norm=matplotlib.colors.LogNorm())
@image_comparison(baseline_images=['specgram_magnitude_freqs',
'specgram_magnitude_freqs_linear'],
remove_text=True, extensions=['png'], tol=0.07,
style='default')
def test_specgram_magnitude_freqs():
'''test axes.specgram in magnitude mode with sinusoidal stimuli'''
n = 1000
Fs = 10.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(100 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for i, (fstim1, fstim2) in enumerate(zip(fstims1, fstims2)):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
y1[-1] = y1[-1]/y1[-1]
y2[-1] = y2[-1]/y2[-1]
y = np.hstack([y1, y2])
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='magnitude')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='magnitude')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='magnitude')
spec21 = ax21.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
spec22 = ax22.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
spec23 = ax23.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
@image_comparison(baseline_images=['specgram_magnitude_noise',
'specgram_magnitude_noise_linear'],
remove_text=True, extensions=['png'],
style='default')
def test_specgram_magnitude_noise():
'''test axes.specgram in magnitude mode with noise stimuli'''
np.random.seed(0)
n = 1000
Fs = 10.
NFFT = int(10 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2])
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='magnitude')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='magnitude')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='magnitude')
spec21 = ax21.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
spec22 = ax22.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
spec23 = ax23.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
@image_comparison(baseline_images=['specgram_angle_freqs'],
remove_text=True, extensions=['png'], tol=0.007,
style='default')
def test_specgram_angle_freqs():
'''test axes.specgram in angle mode with sinusoidal stimuli'''
n = 1000
Fs = 10.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(10 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for i, (fstim1, fstim2) in enumerate(zip(fstims1, fstims2)):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
y1[-1] = y1[-1]/y1[-1]
y2[-1] = y2[-1]/y2[-1]
y = np.hstack([y1, y2])
fig1 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='angle')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='angle')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='angle')
with pytest.raises(ValueError):
ax11.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='default',
mode='phase', scale='dB')
with pytest.raises(ValueError):
ax12.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='onesided',
mode='phase', scale='dB')
with pytest.raises(ValueError):
ax13.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='twosided',
mode='phase', scale='dB')
@image_comparison(baseline_images=['specgram_angle_noise'],
remove_text=True, extensions=['png'],
style='default')
def test_specgram_noise_angle():
'''test axes.specgram in angle mode with noise stimuli'''
np.random.seed(0)
n = 1000
Fs = 10.
NFFT = int(10 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2])
fig1 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='angle')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='angle')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='angle')
with pytest.raises(ValueError):
ax11.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='default',
mode='phase', scale='dB')
with pytest.raises(ValueError):
ax12.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='onesided',
mode='phase', scale='dB')
with pytest.raises(ValueError):
ax13.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='twosided',
mode='phase', scale='dB')
@image_comparison(baseline_images=['specgram_phase_freqs'],
remove_text=True, extensions=['png'],
style='default')
def test_specgram_freqs_phase():
'''test axes.specgram in phase mode with sinusoidal stimuli'''
n = 1000
Fs = 10.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(10 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for i, (fstim1, fstim2) in enumerate(zip(fstims1, fstims2)):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
y1[-1] = y1[-1]/y1[-1]
y2[-1] = y2[-1]/y2[-1]
y = np.hstack([y1, y2])
fig1 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='phase')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='phase')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='phase')
with pytest.raises(ValueError):
ax11.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='default',
mode='phase', scale='dB')
with pytest.raises(ValueError):
ax12.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='onesided',
mode='phase', scale='dB')
with pytest.raises(ValueError):
ax13.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='twosided',
mode='phase', scale='dB')
@image_comparison(baseline_images=['specgram_phase_noise'],
remove_text=True, extensions=['png'],
style='default')
def test_specgram_noise_phase():
'''test axes.specgram in phase mode with noise stimuli'''
np.random.seed(0)
n = 1000
Fs = 10.
NFFT = int(10 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2])
fig1 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default',
mode='phase', )
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
mode='phase', )
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
mode='phase', )
with pytest.raises(ValueError):
ax11.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='default',
mode='phase', scale='dB')
with pytest.raises(ValueError):
ax12.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='onesided',
mode='phase', scale='dB')
with pytest.raises(ValueError):
ax13.specgram(y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='twosided',
mode='phase', scale='dB')
@image_comparison(baseline_images=['psd_freqs'], remove_text=True,
extensions=['png'])
def test_psd_freqs():
'''test axes.psd with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(1000 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for fstim1, fstim2 in zip(fstims1, fstims2):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
y = np.hstack([y1, y2])
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
psd1, freqs1 = ax1.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
psd2, freqs2 = ax2.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
return_line=False)
psd3, freqs3, line3 = ax3.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
return_line=True)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['psd_noise'], remove_text=True,
extensions=['png'])
def test_psd_noise():
'''test axes.psd with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2])
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
psd1, freqs1 = ax1.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
psd2, freqs2 = ax2.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
return_line=False)
psd3, freqs3, line3 = ax3.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
return_line=True)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['csd_freqs'], remove_text=True,
extensions=['png'], tol=0.002)
def test_csd_freqs():
'''test axes.csd with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(1000 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for fstim1, fstim2 in zip(fstims1, fstims2):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
csd1, freqs1 = ax1.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
csd2, freqs2 = ax2.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
return_line=False)
csd3, freqs3, line3 = ax3.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
return_line=True)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['csd_noise'], remove_text=True,
extensions=['png'])
def test_csd_noise():
'''test axes.csd with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
csd1, freqs1 = ax1.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
csd2, freqs2 = ax2.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
return_line=False)
csd3, freqs3, line3 = ax3.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
return_line=True)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['magnitude_spectrum_freqs_linear',
'magnitude_spectrum_freqs_dB'],
remove_text=True,
extensions=['png'])
def test_magnitude_spectrum_freqs():
'''test axes.magnitude_spectrum with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
NFFT = int(1000 * Fs / min(fstims1))
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y = np.zeros(x.size)
for i, fstim1 in enumerate(fstims1):
y += np.sin(fstim1 * x * np.pi * 2) * 10**i
y = y
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11, freqs11, line11 = ax11.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec12, freqs12, line12 = ax12.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec13, freqs13, line13 = ax13.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
spec21, freqs21, line21 = ax21.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default',
scale='dB')
spec22, freqs22, line22 = ax22.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided',
scale='dB')
spec23, freqs23, line23 = ax23.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided',
scale='dB')
ax11.set_xlabel('')
ax12.set_xlabel('')
ax13.set_xlabel('')
ax11.set_ylabel('')
ax12.set_ylabel('')
ax13.set_ylabel('')
ax21.set_xlabel('')
ax22.set_xlabel('')
ax23.set_xlabel('')
ax21.set_ylabel('')
ax22.set_ylabel('')
ax23.set_ylabel('')
@image_comparison(baseline_images=['magnitude_spectrum_noise_linear',
'magnitude_spectrum_noise_dB'],
remove_text=True,
extensions=['png'])
def test_magnitude_spectrum_noise():
'''test axes.magnitude_spectrum with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2]) - .5
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11, freqs11, line11 = ax11.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec12, freqs12, line12 = ax12.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec13, freqs13, line13 = ax13.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
spec21, freqs21, line21 = ax21.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default',
scale='dB')
spec22, freqs22, line22 = ax22.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided',
scale='dB')
spec23, freqs23, line23 = ax23.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided',
scale='dB')
ax11.set_xlabel('')
ax12.set_xlabel('')
ax13.set_xlabel('')
ax11.set_ylabel('')
ax12.set_ylabel('')
ax13.set_ylabel('')
ax21.set_xlabel('')
ax22.set_xlabel('')
ax23.set_xlabel('')
ax21.set_ylabel('')
ax22.set_ylabel('')
ax23.set_ylabel('')
@image_comparison(baseline_images=['angle_spectrum_freqs'],
remove_text=True,
extensions=['png'])
def test_angle_spectrum_freqs():
'''test axes.angle_spectrum with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
NFFT = int(1000 * Fs / min(fstims1))
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y = np.zeros(x.size)
for i, fstim1 in enumerate(fstims1):
y += np.sin(fstim1 * x * np.pi * 2) * 10**i
y = y
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
spec1, freqs1, line1 = ax1.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec2, freqs2, line2 = ax2.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec3, freqs3, line3 = ax3.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['angle_spectrum_noise'],
remove_text=True,
extensions=['png'])
def test_angle_spectrum_noise():
'''test axes.angle_spectrum with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2]) - .5
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
spec1, freqs1, line1 = ax1.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec2, freqs2, line2 = ax2.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec3, freqs3, line3 = ax3.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['phase_spectrum_freqs'],
remove_text=True,
extensions=['png'])
def test_phase_spectrum_freqs():
'''test axes.phase_spectrum with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
NFFT = int(1000 * Fs / min(fstims1))
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y = np.zeros(x.size)
for i, fstim1 in enumerate(fstims1):
y += np.sin(fstim1 * x * np.pi * 2) * 10**i
y = y
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
spec1, freqs1, line1 = ax1.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec2, freqs2, line2 = ax2.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec3, freqs3, line3 = ax3.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['phase_spectrum_noise'],
remove_text=True,
extensions=['png'])
def test_phase_spectrum_noise():
'''test axes.phase_spectrum with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2]) - .5
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
spec1, freqs1, line1 = ax1.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec2, freqs2, line2 = ax2.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec3, freqs3, line3 = ax3.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['twin_spines'], remove_text=True,
extensions=['png'])
def test_twin_spines():
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
fig = plt.figure(figsize=(4, 3))
fig.subplots_adjust(right=0.75)
host = fig.add_subplot(111)
par1 = host.twinx()
par2 = host.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
par2.spines["right"].set_position(("axes", 1.2))
# Having been created by twinx, par2 has its frame off, so the line of
# its detached spine is invisible. First, activate the frame but make
# the patch and spines invisible.
make_patch_spines_invisible(par2)
# Second, show the right spine.
par2.spines["right"].set_visible(True)
p1, = host.plot([0, 1, 2], [0, 1, 2], "b-")
p2, = par1.plot([0, 1, 2], [0, 3, 2], "r-")
p3, = par2.plot([0, 1, 2], [50, 30, 15], "g-")
host.set_xlim(0, 2)
host.set_ylim(0, 2)
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.yaxis.label.set_color(p1.get_color())
par1.yaxis.label.set_color(p2.get_color())
par2.yaxis.label.set_color(p3.get_color())
tkw = dict(size=4, width=1.5)
host.tick_params(axis='y', colors=p1.get_color(), **tkw)
par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
par2.tick_params(axis='y', colors=p3.get_color(), **tkw)
host.tick_params(axis='x', **tkw)
@image_comparison(baseline_images=['twin_spines_on_top', 'twin_spines_on_top'],
extensions=['png'], remove_text=True)
def test_twin_spines_on_top():
matplotlib.rcParams['axes.linewidth'] = 48.0
matplotlib.rcParams['lines.linewidth'] = 48.0
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
data = np.array([[1000, 1100, 1200, 1250],
[310, 301, 360, 400]])
ax2 = ax1.twinx()
ax1.plot(data[0], data[1]/1E3, color='#BEAED4')
ax1.fill_between(data[0], data[1]/1E3, color='#BEAED4', alpha=.8)
ax2.plot(data[0], data[1]/1E3, color='#7FC97F')
ax2.fill_between(data[0], data[1]/1E3, color='#7FC97F', alpha=.5)
# Reuse testcase from above for a labeled data test
data = {"i": data[0], "j": data[1]/1E3}
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax2 = ax1.twinx()
ax1.plot("i", "j", color='#BEAED4', data=data)
ax1.fill_between("i", "j", color='#BEAED4', alpha=.8, data=data)
ax2.plot("i", "j", color='#7FC97F', data=data)
ax2.fill_between("i", "j", color='#7FC97F', alpha=.5, data=data)
def test_rcparam_grid_minor():
orig_grid = matplotlib.rcParams['axes.grid']
orig_locator = matplotlib.rcParams['axes.grid.which']
matplotlib.rcParams['axes.grid'] = True
values = (
(('both'), (True, True)),
(('major'), (True, False)),
(('minor'), (False, True))
)
for locator, result in values:
matplotlib.rcParams['axes.grid.which'] = locator
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
assert (ax.xaxis._gridOnMajor, ax.xaxis._gridOnMinor) == result
matplotlib.rcParams['axes.grid'] = orig_grid
matplotlib.rcParams['axes.grid.which'] = orig_locator
def test_vline_limit():
fig = plt.figure()
ax = fig.gca()
ax.axvline(0.5)
ax.plot([-0.1, 0, 0.2, 0.1])
(ymin, ymax) = ax.get_ylim()
assert_allclose(ax.get_ylim(), (-.1, .2))
def test_empty_shared_subplots():
# empty plots with shared axes inherit limits from populated plots
fig, axes = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
axes[0].plot([1, 2, 3], [2, 4, 6])
x0, x1 = axes[1].get_xlim()
y0, y1 = axes[1].get_ylim()
assert x0 <= 1
assert x1 >= 3
assert y0 <= 2
assert y1 >= 6
def test_shared_with_aspect_1():
# allow sharing one axis
for adjustable in ['box', 'datalim']:
fig, axes = plt.subplots(nrows=2, sharex=True)
axes[0].set_aspect(2, adjustable=adjustable, share=True)
assert axes[1].get_aspect() == 2
assert axes[1].get_adjustable() == adjustable
fig, axes = plt.subplots(nrows=2, sharex=True)
axes[0].set_aspect(2, adjustable=adjustable)
assert axes[1].get_aspect() == 'auto'
def test_shared_with_aspect_2():
# Share 2 axes only with 'box':
fig, axes = plt.subplots(nrows=2, sharex=True, sharey=True)
axes[0].set_aspect(2, share=True)
axes[0].plot([1, 2], [3, 4])
axes[1].plot([3, 4], [1, 2])
plt.draw() # Trigger apply_aspect().
assert axes[0].get_xlim() == axes[1].get_xlim()
assert axes[0].get_ylim() == axes[1].get_ylim()
def test_shared_with_aspect_3():
# Different aspect ratios:
for adjustable in ['box', 'datalim']:
fig, axes = plt.subplots(nrows=2, sharey=True)
axes[0].set_aspect(2, adjustable=adjustable)
axes[1].set_aspect(0.5, adjustable=adjustable)
axes[0].plot([1, 2], [3, 4])
axes[1].plot([3, 4], [1, 2])
plt.draw() # Trigger apply_aspect().
assert axes[0].get_xlim() != axes[1].get_xlim()
assert axes[0].get_ylim() == axes[1].get_ylim()
fig_aspect = fig.bbox_inches.height / fig.bbox_inches.width
for ax in axes:
p = ax.get_position()
box_aspect = p.height / p.width
lim_aspect = ax.viewLim.height / ax.viewLim.width
expected = fig_aspect * box_aspect / lim_aspect
assert round(expected, 4) == round(ax.get_aspect(), 4)
@pytest.mark.parametrize('twin', ('x', 'y'))
def test_twin_with_aspect(twin):
fig, ax = plt.subplots()
# test twinx or twiny
ax_twin = getattr(ax, 'twin{}'.format(twin))()
ax.set_aspect(5)
ax_twin.set_aspect(2)
assert_array_equal(ax.bbox.extents,
ax_twin.bbox.extents)
def test_relim_visible_only():
x1 = (0., 10.)
y1 = (0., 10.)
x2 = (-10., 20.)
y2 = (-10., 30.)
fig = matplotlib.figure.Figure()
ax = fig.add_subplot(111)
ax.plot(x1, y1)
assert ax.get_xlim() == x1
assert ax.get_ylim() == y1
l = ax.plot(x2, y2)
assert ax.get_xlim() == x2
assert ax.get_ylim() == y2
l[0].set_visible(False)
assert ax.get_xlim() == x2
assert ax.get_ylim() == y2
ax.relim(visible_only=True)
ax.autoscale_view()
assert ax.get_xlim() == x1
assert ax.get_ylim() == y1
def test_text_labelsize():
"""
tests for issue #1172
"""
fig = plt.figure()
ax = fig.gca()
ax.tick_params(labelsize='large')
ax.tick_params(direction='out')
@image_comparison(baseline_images=['pie_default'], extensions=['png'])
def test_pie_default():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots(figsize=(8, 6))
pie1 = ax1.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90)
@image_comparison(baseline_images=['pie_linewidth_0', 'pie_linewidth_0',
'pie_linewidth_0'],
extensions=['png'])
def test_pie_linewidth_0():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0})
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
# Reuse testcase from above for a labeled data test
data = {"l": labels, "s": sizes, "c": colors, "ex": explode}
fig = plt.figure()
ax = fig.gca()
ax.pie("s", explode="ex", labels="l", colors="c",
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0}, data=data)
ax.axis('equal')
# And again to test the pyplot functions which should also be able to be
# called with a data kwarg
plt.figure()
plt.pie("s", explode="ex", labels="l", colors="c",
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0}, data=data)
plt.axis('equal')
@image_comparison(baseline_images=['pie_center_radius'], extensions=['png'])
def test_pie_center_radius():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0}, center=(1, 2), radius=1.5)
plt.annotate("Center point", xy=(1, 2), xytext=(1, 1.5),
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"))
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(baseline_images=['pie_linewidth_2'], extensions=['png'])
def test_pie_linewidth_2():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 2})
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(baseline_images=['pie_ccw_true'], extensions=['png'])
def test_pie_ccw_true():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
counterclock=True)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(baseline_images=['pie_frame_grid'], extensions=['png'])
def test_pie_frame_grid():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
# only "explode" the 2nd slice (i.e. 'Hogs')
explode = (0, 0.1, 0, 0)
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0},
frame=True, center=(2, 2))
plt.pie(sizes[::-1], explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0},
frame=True, center=(5, 2))
plt.pie(sizes, explode=explode[::-1], labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0},
frame=True, center=(3, 5))
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(baseline_images=['pie_rotatelabels_true'],
extensions=['png'])
def test_pie_rotatelabels_true():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Hogwarts', 'Frogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
rotatelabels=True)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(baseline_images=['pie_no_label'], extensions=['png'])
def test_pie_nolabel_but_legend():
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90, labeldistance=None,
rotatelabels=True)
plt.axis('equal')
plt.ylim(-1.2, 1.2)
plt.legend()
def test_pie_textprops():
data = [23, 34, 45]
labels = ["Long name 1", "Long name 2", "Long name 3"]
textprops = dict(horizontalalignment="center",
verticalalignment="top",
rotation=90,
rotation_mode="anchor",
size=12, color="red")
_, texts, autopct = plt.gca().pie(data, labels=labels, autopct='%.2f',
textprops=textprops)
for labels in [texts, autopct]:
for tx in labels:
assert tx.get_ha() == textprops["horizontalalignment"]
assert tx.get_va() == textprops["verticalalignment"]
assert tx.get_rotation() == textprops["rotation"]
assert tx.get_rotation_mode() == textprops["rotation_mode"]
assert tx.get_size() == textprops["size"]
assert tx.get_color() == textprops["color"]
@image_comparison(baseline_images=['set_get_ticklabels'], extensions=['png'])
def test_set_get_ticklabels():
# test issue 2246
fig, ax = plt.subplots(2)
ha = ['normal', 'set_x/yticklabels']
ax[0].plot(np.arange(10))
ax[0].set_title(ha[0])
ax[1].plot(np.arange(10))
ax[1].set_title(ha[1])
# set ticklabel to 1 plot in normal way
ax[0].set_xticklabels(('a', 'b', 'c', 'd'))
ax[0].set_yticklabels(('11', '12', '13', '14'))
# set ticklabel to the other plot, expect the 2 plots have same label
# setting pass get_ticklabels return value as ticklabels argument
ax[1].set_xticklabels(ax[0].get_xticklabels())
ax[1].set_yticklabels(ax[0].get_yticklabels())
@image_comparison(
baseline_images=['retain_tick_visibility'],
extensions=['png'],
)
def test_retain_tick_visibility():
fig, ax = plt.subplots()
plt.plot([0, 1, 2], [0, -1, 4])
plt.setp(ax.get_yticklabels(), visible=False)
ax.tick_params(axis="y", which="both", length=0)
def test_tick_label_update():
# test issue 9397
fig, ax = plt.subplots()
# Set up a dummy formatter
def formatter_func(x, pos):
return "unit value" if x == 1 else ""
ax.xaxis.set_major_formatter(plt.FuncFormatter(formatter_func))
# Force some of the x-axis ticks to be outside of the drawn range
ax.set_xticks([-1, 0, 1, 2, 3])
ax.set_xlim(-0.5, 2.5)
ax.figure.canvas.draw()
tick_texts = [tick.get_text() for tick in ax.xaxis.get_ticklabels()]
assert tick_texts == ["", "", "unit value", "", ""]
@image_comparison(baseline_images=['o_marker_path_snap'], extensions=['png'],
savefig_kwarg={'dpi': 72})
def test_o_marker_path_snap():
fig, ax = plt.subplots()
ax.margins(.1)
for ms in range(1, 15):
ax.plot([1, 2, ], np.ones(2) + ms, 'o', ms=ms)
for ms in np.linspace(1, 10, 25):
ax.plot([3, 4, ], np.ones(2) + ms, 'o', ms=ms)
def test_margins():
# test all ways margins can be called
data = [1, 10]
xmin = 0.0
xmax = len(data) - 1.0
ymin = min(data)
ymax = max(data)
fig1, ax1 = plt.subplots(1, 1)
ax1.plot(data)
ax1.margins(1)
assert ax1.margins() == (1, 1)
assert ax1.get_xlim() == (xmin - (xmax - xmin) * 1,
xmax + (xmax - xmin) * 1)
assert ax1.get_ylim() == (ymin - (ymax - ymin) * 1,
ymax + (ymax - ymin) * 1)
fig2, ax2 = plt.subplots(1, 1)
ax2.plot(data)
ax2.margins(0.5, 2)
assert ax2.margins() == (0.5, 2)
assert ax2.get_xlim() == (xmin - (xmax - xmin) * 0.5,
xmax + (xmax - xmin) * 0.5)
assert ax2.get_ylim() == (ymin - (ymax - ymin) * 2,
ymax + (ymax - ymin) * 2)
fig3, ax3 = plt.subplots(1, 1)
ax3.plot(data)
ax3.margins(x=-0.2, y=0.5)
assert ax3.margins() == (-0.2, 0.5)
assert ax3.get_xlim() == (xmin - (xmax - xmin) * -0.2,
xmax + (xmax - xmin) * -0.2)
assert ax3.get_ylim() == (ymin - (ymax - ymin) * 0.5,
ymax + (ymax - ymin) * 0.5)
def test_length_one_hist():
fig, ax = plt.subplots()
ax.hist(1)
ax.hist([1])
def test_pathological_hexbin():
# issue #2863
out = io.BytesIO()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
mylist = [10] * 100
fig, ax = plt.subplots(1, 1)
ax.hexbin(mylist, mylist)
fig.savefig(out)
assert len(w) == 0
def test_color_None():
# issue 3855
fig, ax = plt.subplots()
ax.plot([1, 2], [1, 2], color=None)
def test_color_alias():
# issues 4157 and 4162
fig, ax = plt.subplots()
line = ax.plot([0, 1], c='lime')[0]
assert 'lime' == line.get_color()
def test_numerical_hist_label():
fig, ax = plt.subplots()
ax.hist([range(15)] * 5, label=range(5))
ax.legend()
def test_unicode_hist_label():
fig, ax = plt.subplots()
a = (b'\xe5\xbe\x88\xe6\xbc\x82\xe4\xba\xae, ' +
b'r\xc3\xb6m\xc3\xa4n ch\xc3\xa4r\xc3\xa1ct\xc3\xa8rs')
b = b'\xd7\xa9\xd7\x9c\xd7\x95\xd7\x9d'
labels = [a.decode('utf-8'),
'hi aardvark',
b.decode('utf-8'),
]
ax.hist([range(15)] * 3, label=labels)
ax.legend()
def test_move_offsetlabel():
data = np.random.random(10) * 1e-22
fig, ax = plt.subplots()
ax.plot(data)
ax.yaxis.tick_right()
assert (1, 0.5) == ax.yaxis.offsetText.get_position()
@image_comparison(baseline_images=['rc_spines'], extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_rc_spines():
rc_dict = {
'axes.spines.left': False,
'axes.spines.right': False,
'axes.spines.top': False,
'axes.spines.bottom': False}
with matplotlib.rc_context(rc_dict):
fig, ax = plt.subplots()
@image_comparison(baseline_images=['rc_grid'], extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_rc_grid():
fig = plt.figure()
rc_dict0 = {
'axes.grid': True,
'axes.grid.axis': 'both'
}
rc_dict1 = {
'axes.grid': True,
'axes.grid.axis': 'x'
}
rc_dict2 = {
'axes.grid': True,
'axes.grid.axis': 'y'
}
dict_list = [rc_dict0, rc_dict1, rc_dict2]
i = 1
for rc_dict in dict_list:
with matplotlib.rc_context(rc_dict):
fig.add_subplot(3, 1, i)
i += 1
def test_rc_tick():
d = {'xtick.bottom': False, 'xtick.top': True,
'ytick.left': True, 'ytick.right': False}
with plt.rc_context(rc=d):
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
xax = ax1.xaxis
yax = ax1.yaxis
# tick1On bottom/left
assert not xax._major_tick_kw['tick1On']
assert xax._major_tick_kw['tick2On']
assert not xax._minor_tick_kw['tick1On']
assert xax._minor_tick_kw['tick2On']
assert yax._major_tick_kw['tick1On']
assert not yax._major_tick_kw['tick2On']
assert yax._minor_tick_kw['tick1On']
assert not yax._minor_tick_kw['tick2On']
def test_rc_major_minor_tick():
d = {'xtick.top': True, 'ytick.right': True, # Enable all ticks
'xtick.bottom': True, 'ytick.left': True,
# Selectively disable
'xtick.minor.bottom': False, 'xtick.major.bottom': False,
'ytick.major.left': False, 'ytick.minor.left': False}
with plt.rc_context(rc=d):
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
xax = ax1.xaxis
yax = ax1.yaxis
# tick1On bottom/left
assert not xax._major_tick_kw['tick1On']
assert xax._major_tick_kw['tick2On']
assert not xax._minor_tick_kw['tick1On']
assert xax._minor_tick_kw['tick2On']
assert not yax._major_tick_kw['tick1On']
assert yax._major_tick_kw['tick2On']
assert not yax._minor_tick_kw['tick1On']
assert yax._minor_tick_kw['tick2On']
def test_square_plot():
x = np.arange(4)
y = np.array([1., 3., 5., 7.])
fig, ax = plt.subplots()
ax.plot(x, y, 'mo')
ax.axis('square')
xlim, ylim = ax.get_xlim(), ax.get_ylim()
assert np.diff(xlim) == np.diff(ylim)
assert ax.get_aspect() == 'equal'
assert_array_almost_equal(
ax.get_position(original=True).extents,
np.array((0.125, 0.1, 0.9, 0.9)))
assert_array_almost_equal(
ax.get_position(original=False).extents,
np.array((0.2125, 0.1, 0.8125, 0.9)))
def test_no_None():
fig, ax = plt.subplots()
with pytest.raises(ValueError):
plt.plot(None)
with pytest.raises(ValueError):
plt.plot(None, None)
@pytest.mark.parametrize(
"xy, cls", [
((), mpl.image.AxesImage), # (0, N)
(((3, 7), (2, 6)), mpl.image.AxesImage), # (xmin, xmax)
((range(5), range(4)), mpl.image.AxesImage), # regular grid
(([1, 2, 4, 8, 16], [0, 1, 2, 3]), # irregular grid
mpl.image.PcolorImage),
((np.random.random((4, 5)), np.random.random((4, 5))), # 2D coords
mpl.collections.QuadMesh),
]
)
def test_pcolorfast_colormapped(xy, cls):
fig, ax = plt.subplots()
data = np.arange(12).reshape((3, 4))
assert type(ax.pcolorfast(*xy, data)) == cls
def test_pcolor_fast_RGB():
fig, ax = plt.subplots(1, 1)
np.random.seed(19680801)
C = np.random.rand(10, 10, 3) # RGB image [0,1]
x = np.arange(11, dtype=np.float)
y = np.arange(11, dtype=np.float)
xv, yv = np.meshgrid(x, y)
with pytest.raises(ValueError):
ax.pcolorfast(xv, yv, C)
def test_shared_scale():
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True)
axs[0, 0].set_xscale("log")
axs[0, 0].set_yscale("log")
for ax in axs.flat:
assert ax.get_yscale() == 'log'
assert ax.get_xscale() == 'log'
axs[1, 1].set_xscale("linear")
axs[1, 1].set_yscale("linear")
for ax in axs.flat:
assert ax.get_yscale() == 'linear'
assert ax.get_xscale() == 'linear'
def test_violin_point_mass():
"""Violin plot should handle point mass pdf gracefully."""
plt.violinplot(np.array([0, 0]))
def generate_errorbar_inputs():
base_xy = cycler('x', [np.arange(5)]) + cycler('y', [np.ones(5)])
err_cycler = cycler('err', [1,
[1, 1, 1, 1, 1],
[[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]],
[[1]] * 5,
np.ones(5),
np.ones((2, 5)),
np.ones((5, 1)),
None
])
xerr_cy = cycler('xerr', err_cycler)
yerr_cy = cycler('yerr', err_cycler)
empty = ((cycler('x', [[]]) + cycler('y', [[]])) *
cycler('xerr', [[], None]) * cycler('yerr', [[], None]))
xerr_only = base_xy * xerr_cy
yerr_only = base_xy * yerr_cy
both_err = base_xy * yerr_cy * xerr_cy
return [*xerr_only, *yerr_only, *both_err, *empty]
@pytest.mark.parametrize('kwargs', generate_errorbar_inputs())
def test_errorbar_inputs_shotgun(kwargs):
# (n, 1)-shaped error deprecation already tested by test_errorbar.
with mpl.cbook._suppress_matplotlib_deprecation_warning():
ax = plt.gca()
eb = ax.errorbar(**kwargs)
eb.remove()
@image_comparison(baseline_images=["dash_offset"], remove_text=True)
def test_dash_offset():
fig, ax = plt.subplots()
x = np.linspace(0, 10)
y = np.ones_like(x)
for j in range(0, 100, 2):
ax.plot(x, j*y, ls=(j, (10, 10)), lw=5, color='k')
def test_title_pad():
# check that title padding puts the title in the right
# place...
fig, ax = plt.subplots()
ax.set_title('aardvark', pad=30.)
m = ax.titleOffsetTrans.get_matrix()
assert m[1, -1] == (30. / 72. * fig.dpi)
ax.set_title('aardvark', pad=0.)
m = ax.titleOffsetTrans.get_matrix()
assert m[1, -1] == 0.
# check that it is reverted...
ax.set_title('aardvark', pad=None)
m = ax.titleOffsetTrans.get_matrix()
assert m[1, -1] == (matplotlib.rcParams['axes.titlepad'] / 72. * fig.dpi)
def test_title_location_roundtrip():
fig, ax = plt.subplots()
# set default title location
plt.rcParams['axes.titlelocation'] = 'center'
ax.set_title('aardvark')
ax.set_title('left', loc='left')
ax.set_title('right', loc='right')
assert 'left' == ax.get_title(loc='left')
assert 'right' == ax.get_title(loc='right')
assert 'aardvark' == ax.get_title(loc='center')
with pytest.raises(ValueError):
ax.get_title(loc='foo')
with pytest.raises(ValueError):
ax.set_title('fail', loc='foo')
@image_comparison(baseline_images=["loglog"], remove_text=True,
extensions=['png'], tol=0.02)
def test_loglog():
fig, ax = plt.subplots()
x = np.arange(1, 11)
ax.loglog(x, x**3, lw=5)
ax.tick_params(length=25, width=2)
ax.tick_params(length=15, width=2, which='minor')
@image_comparison(baseline_images=["test_loglog_nonpos"],
remove_text=True, extensions=['png'], style='mpl20')
def test_loglog_nonpos():
fig, ax = plt.subplots(3, 3)
x = np.arange(1, 11)
y = x**3
y[7] = -3.
x[4] = -10
for nn, mcx in enumerate(['mask', 'clip', '']):
for mm, mcy in enumerate(['mask', 'clip', '']):
kws = {}
if mcx:
kws['nonposx'] = mcx
if mcy:
kws['nonposy'] = mcy
ax[mm, nn].loglog(x, y**3, lw=2, **kws)
@pytest.mark.style('default')
def test_axes_margins():
fig, ax = plt.subplots()
ax.plot([0, 1, 2, 3])
assert ax.get_ybound()[0] != 0
fig, ax = plt.subplots()
ax.bar([0, 1, 2, 3], [1, 1, 1, 1])
assert ax.get_ybound()[0] == 0
fig, ax = plt.subplots()
ax.barh([0, 1, 2, 3], [1, 1, 1, 1])
assert ax.get_xbound()[0] == 0
fig, ax = plt.subplots()
ax.pcolor(np.zeros((10, 10)))
assert ax.get_xbound() == (0, 10)
assert ax.get_ybound() == (0, 10)
fig, ax = plt.subplots()
ax.pcolorfast(np.zeros((10, 10)))
assert ax.get_xbound() == (0, 10)
assert ax.get_ybound() == (0, 10)
fig, ax = plt.subplots()
ax.hist(np.arange(10))
assert ax.get_ybound()[0] == 0
fig, ax = plt.subplots()
ax.imshow(np.zeros((10, 10)))
assert ax.get_xbound() == (-0.5, 9.5)
assert ax.get_ybound() == (-0.5, 9.5)
@pytest.fixture(params=['x', 'y'])
def shared_axis_remover(request):
def _helper_x(ax):
ax2 = ax.twinx()
ax2.remove()
ax.set_xlim(0, 15)
r = ax.xaxis.get_major_locator()()
assert r[-1] > 14
def _helper_y(ax):
ax2 = ax.twiny()
ax2.remove()
ax.set_ylim(0, 15)
r = ax.yaxis.get_major_locator()()
assert r[-1] > 14
return {"x": _helper_x, "y": _helper_y}[request.param]
@pytest.fixture(params=['gca', 'subplots', 'subplots_shared', 'add_axes'])
def shared_axes_generator(request):
# test all of the ways to get fig/ax sets
if request.param == 'gca':
fig = plt.figure()
ax = fig.gca()
elif request.param == 'subplots':
fig, ax = plt.subplots()
elif request.param == 'subplots_shared':
fig, ax_lst = plt.subplots(2, 2, sharex='all', sharey='all')
ax = ax_lst[0][0]
elif request.param == 'add_axes':
fig = plt.figure()
ax = fig.add_axes([.1, .1, .8, .8])
return fig, ax
def test_remove_shared_axes(shared_axes_generator, shared_axis_remover):
# test all of the ways to get fig/ax sets
fig, ax = shared_axes_generator
shared_axis_remover(ax)
def test_remove_shared_axes_relim():
fig, ax_lst = plt.subplots(2, 2, sharex='all', sharey='all')
ax = ax_lst[0][0]
orig_xlim = ax_lst[0][1].get_xlim()
ax.remove()
ax.set_xlim(0, 5)
assert_array_equal(ax_lst[0][1].get_xlim(), orig_xlim)
def test_shared_axes_autoscale():
l = np.arange(-80, 90, 40)
t = np.random.random_sample((l.size, l.size))
ax1 = plt.subplot(211)
ax1.set_xlim(-1000, 1000)
ax1.set_ylim(-1000, 1000)
ax1.contour(l, l, t)
ax2 = plt.subplot(212, sharex=ax1, sharey=ax1)
ax2.contour(l, l, t)
assert not ax1.get_autoscalex_on() and not ax2.get_autoscalex_on()
assert not ax1.get_autoscaley_on() and not ax2.get_autoscaley_on()
assert ax1.get_xlim() == ax2.get_xlim() == (-1000, 1000)
assert ax1.get_ylim() == ax2.get_ylim() == (-1000, 1000)
def test_adjust_numtick_aspect():
fig, ax = plt.subplots()
ax.yaxis.get_major_locator().set_params(nbins='auto')
ax.set_xlim(0, 1000)
ax.set_aspect('equal')
fig.canvas.draw()
assert len(ax.yaxis.get_major_locator()()) == 2
ax.set_ylim(0, 1000)
fig.canvas.draw()
assert len(ax.yaxis.get_major_locator()()) > 2
@image_comparison(baseline_images=["auto_numticks"], style='default',
extensions=['png'])
def test_auto_numticks():
# Make tiny, empty subplots, verify that there are only 3 ticks.
fig, axes = plt.subplots(4, 4)
@image_comparison(baseline_images=["auto_numticks_log"], style='default',
extensions=['png'])
def test_auto_numticks_log():
# Verify that there are not too many ticks with a large log range.
fig, ax = plt.subplots()
matplotlib.rcParams['axes.autolimit_mode'] = 'round_numbers'
ax.loglog([1e-20, 1e5], [1e-16, 10])
def test_broken_barh_empty():
fig, ax = plt.subplots()
ax.broken_barh([], (.1, .5))
def test_broken_barh_timedelta():
"""Check that timedelta works as x, dx pair for this method """
fig, ax = plt.subplots()
pp = ax.broken_barh([(datetime.datetime(2018, 11, 9, 0, 0, 0),
datetime.timedelta(hours=1))], [1, 2])
assert pp.get_paths()[0].vertices[0, 0] == 737007.0
assert pp.get_paths()[0].vertices[2, 0] == 737007.0 + 1 / 24
def test_pandas_pcolormesh(pd):
time = pd.date_range('2000-01-01', periods=10)
depth = np.arange(20)
data = np.random.rand(20, 10)
fig, ax = plt.subplots()
ax.pcolormesh(time, depth, data)
def test_pandas_indexing_dates(pd):
dates = np.arange('2005-02', '2005-03', dtype='datetime64[D]')
values = np.sin(np.array(range(len(dates))))
df = pd.DataFrame({'dates': dates, 'values': values})
ax = plt.gca()
without_zero_index = df[np.array(df.index) % 2 == 1].copy()
ax.plot('dates', 'values', data=without_zero_index)
def test_pandas_errorbar_indexing(pd):
df = pd.DataFrame(np.random.uniform(size=(5, 4)),
columns=['x', 'y', 'xe', 'ye'],
index=[1, 2, 3, 4, 5])
fig, ax = plt.subplots()
ax.errorbar('x', 'y', xerr='xe', yerr='ye', data=df)
def test_pandas_indexing_hist(pd):
ser_1 = pd.Series(data=[1, 2, 2, 3, 3, 4, 4, 4, 4, 5])
ser_2 = ser_1.iloc[1:]
fig, axes = plt.subplots()
axes.hist(ser_2)
def test_pandas_bar_align_center(pd):
# Tests fix for issue 8767
df = pd.DataFrame({'a': range(2), 'b': range(2)})
fig, ax = plt.subplots(1)
ax.bar(df.loc[df['a'] == 1, 'b'],
df.loc[df['a'] == 1, 'b'],
align='center')
fig.canvas.draw()
def test_axis_set_tick_params_labelsize_labelcolor():
# Tests fix for issue 4346
axis_1 = plt.subplot()
axis_1.yaxis.set_tick_params(labelsize=30, labelcolor='red',
direction='out')
# Expected values after setting the ticks
assert axis_1.yaxis.majorTicks[0]._size == 4.0
assert axis_1.yaxis.majorTicks[0]._color == 'k'
assert axis_1.yaxis.majorTicks[0]._labelsize == 30.0
assert axis_1.yaxis.majorTicks[0]._labelcolor == 'red'
def test_axes_tick_params_gridlines():
# Now treating grid params like other Tick params
ax = plt.subplot()
ax.tick_params(grid_color='b', grid_linewidth=5, grid_alpha=0.5,
grid_linestyle='dashdot')
for axis in ax.xaxis, ax.yaxis:
assert axis.majorTicks[0]._grid_color == 'b'
assert axis.majorTicks[0]._grid_linewidth == 5
assert axis.majorTicks[0]._grid_alpha == 0.5
assert axis.majorTicks[0]._grid_linestyle == 'dashdot'
def test_axes_tick_params_ylabelside():
# Tests fix for issue 10267
ax = plt.subplot()
ax.tick_params(labelleft=False, labelright=True,
which='major')
ax.tick_params(labelleft=False, labelright=True,
which='minor')
# expects left false, right true
assert ax.yaxis.majorTicks[0].label1.get_visible() is False
assert ax.yaxis.majorTicks[0].label2.get_visible() is True
assert ax.yaxis.minorTicks[0].label1.get_visible() is False
assert ax.yaxis.minorTicks[0].label2.get_visible() is True
def test_axes_tick_params_xlabelside():
# Tests fix for issue 10267
ax = plt.subplot()
ax.tick_params(labeltop=True, labelbottom=False,
which='major')
ax.tick_params(labeltop=True, labelbottom=False,
which='minor')
# expects top True, bottom False
# label1.get_visible() mapped to labelbottom
# label2.get_visible() mapped to labeltop
assert ax.xaxis.majorTicks[0].label1.get_visible() is False
assert ax.xaxis.majorTicks[0].label2.get_visible() is True
assert ax.xaxis.minorTicks[0].label1.get_visible() is False
assert ax.xaxis.minorTicks[0].label2.get_visible() is True
def test_none_kwargs():
fig, ax = plt.subplots()
ln, = ax.plot(range(32), linestyle=None)
assert ln.get_linestyle() == '-'
def test_ls_ds_conflict():
# Passing the drawstyle with the linestyle is deprecated since 3.1.
# We still need to test this until it's removed from the code.
# But we don't want to see the deprecation warning in the test.
with matplotlib.cbook._suppress_matplotlib_deprecation_warning(), \
pytest.raises(ValueError):
plt.plot(range(32), linestyle='steps-pre:', drawstyle='steps-post')
def test_bar_uint8():
xs = [0, 1, 2, 3]
b = plt.bar(np.array(xs, dtype=np.uint8), [2, 3, 4, 5], align="edge")
for (patch, x) in zip(b.patches, xs):
assert patch.xy[0] == x
@image_comparison(baseline_images=['date_timezone_x'], extensions=['png'])
def test_date_timezone_x():
# Tests issue 5575
time_index = [datetime.datetime(2016, 2, 22, hour=x,
tzinfo=dutz.gettz('Canada/Eastern'))
for x in range(3)]
# Same Timezone
fig = plt.figure(figsize=(20, 12))
plt.subplot(2, 1, 1)
plt.plot_date(time_index, [3] * 3, tz='Canada/Eastern')
# Different Timezone
plt.subplot(2, 1, 2)
plt.plot_date(time_index, [3] * 3, tz='UTC')
@image_comparison(baseline_images=['date_timezone_y'],
extensions=['png'])
def test_date_timezone_y():
# Tests issue 5575
time_index = [datetime.datetime(2016, 2, 22, hour=x,
tzinfo=dutz.gettz('Canada/Eastern'))
for x in range(3)]
# Same Timezone
fig = plt.figure(figsize=(20, 12))
plt.subplot(2, 1, 1)
plt.plot_date([3] * 3,
time_index, tz='Canada/Eastern', xdate=False, ydate=True)
# Different Timezone
plt.subplot(2, 1, 2)
plt.plot_date([3] * 3, time_index, tz='UTC', xdate=False, ydate=True)
@image_comparison(baseline_images=['date_timezone_x_and_y'],
extensions=['png'])
def test_date_timezone_x_and_y():
# Tests issue 5575
UTC = datetime.timezone.utc
time_index = [datetime.datetime(2016, 2, 22, hour=x, tzinfo=UTC)
for x in range(3)]
# Same Timezone
fig = plt.figure(figsize=(20, 12))
plt.subplot(2, 1, 1)
plt.plot_date(time_index, time_index, tz='UTC', ydate=True)
# Different Timezone
plt.subplot(2, 1, 2)
plt.plot_date(time_index, time_index, tz='US/Eastern', ydate=True)
@image_comparison(baseline_images=['axisbelow'],
extensions=['png'], remove_text=True)
def test_axisbelow():
# Test 'line' setting added in 6287.
# Show only grids, not frame or ticks, to make this test
# independent of future change to drawing order of those elements.
fig, axs = plt.subplots(ncols=3, sharex=True, sharey=True)
settings = (False, 'line', True)
for ax, setting in zip(axs, settings):
ax.plot((0, 10), (0, 10), lw=10, color='m')
circ = mpatches.Circle((3, 3), color='r')
ax.add_patch(circ)
ax.grid(color='c', linestyle='-', linewidth=3)
ax.tick_params(top=False, bottom=False,
left=False, right=False)
for spine in ax.spines.values():
spine.set_visible(False)
ax.set_axisbelow(setting)
@image_comparison(baseline_images=['titletwiny'], style='mpl20',
extensions=['png'])
def test_titletwiny():
# Test that title is put above xlabel if xlabel at top
fig, ax = plt.subplots()
fig.subplots_adjust(top=0.8)
ax2 = ax.twiny()
ax.set_xlabel('Xlabel')
ax2.set_xlabel('Xlabel2')
ax.set_title('Title')
def test_titlesetpos():
# Test that title stays put if we set it manually
fig, ax = plt.subplots()
fig.subplots_adjust(top=0.8)
ax2 = ax.twiny()
ax.set_xlabel('Xlabel')
ax2.set_xlabel('Xlabel2')
ax.set_title('Title')
pos = (0.5, 1.11)
ax.title.set_position(pos)
renderer = fig.canvas.get_renderer()
ax._update_title_position(renderer)
assert ax.title.get_position() == pos
def test_title_xticks_top():
# Test that title moves if xticks on top of axes.
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('top')
ax.set_title('xlabel top')
fig.canvas.draw()
assert ax.title.get_position()[1] > 1.04
def test_title_xticks_top_both():
# Test that title moves if xticks on top of axes.
fig, ax = plt.subplots()
ax.tick_params(axis="x", bottom=True, top=True,
labelbottom=True, labeltop=True)
ax.set_title('xlabel top')
fig.canvas.draw()
assert ax.title.get_position()[1] > 1.04
def test_offset_label_color():
# Tests issue 6440
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot([1.01e9, 1.02e9, 1.03e9])
ax.yaxis.set_tick_params(labelcolor='red')
assert ax.yaxis.get_offset_text().get_color() == 'red'
def test_large_offset():
fig, ax = plt.subplots()
ax.plot((1 + np.array([0, 1.e-12])) * 1.e27)
fig.canvas.draw()
def test_barb_units():
fig, ax = plt.subplots()
dates = [datetime.datetime(2017, 7, 15, 18, i) for i in range(0, 60, 10)]
y = np.linspace(0, 5, len(dates))
u = v = np.linspace(0, 50, len(dates))
ax.barbs(dates, y, u, v)
def test_quiver_units():
fig, ax = plt.subplots()
dates = [datetime.datetime(2017, 7, 15, 18, i) for i in range(0, 60, 10)]
y = np.linspace(0, 5, len(dates))
u = v = np.linspace(0, 50, len(dates))
ax.quiver(dates, y, u, v)
def test_bar_color_cycle():
to_rgb = mcolors.to_rgb
fig, ax = plt.subplots()
for j in range(5):
ln, = ax.plot(range(3))
brs = ax.bar(range(3), range(3))
for br in brs:
assert to_rgb(ln.get_color()) == to_rgb(br.get_facecolor())
def test_tick_param_label_rotation():
fix, (ax, ax2) = plt.subplots(1, 2)
ax.plot([0, 1], [0, 1])
ax2.plot([0, 1], [0, 1])
ax.xaxis.set_tick_params(which='both', rotation=75)
ax.yaxis.set_tick_params(which='both', rotation=90)
for text in ax.get_xticklabels(which='both'):
assert text.get_rotation() == 75
for text in ax.get_yticklabels(which='both'):
assert text.get_rotation() == 90
ax2.tick_params(axis='x', labelrotation=53)
ax2.tick_params(axis='y', rotation=35)
for text in ax2.get_xticklabels(which='major'):
assert text.get_rotation() == 53
for text in ax2.get_yticklabels(which='major'):
assert text.get_rotation() == 35
@pytest.mark.style('default')
def test_fillbetween_cycle():
fig, ax = plt.subplots()
for j in range(3):
cc = ax.fill_between(range(3), range(3))
target = mcolors.to_rgba('C{}'.format(j))
assert tuple(cc.get_facecolors().squeeze()) == tuple(target)
for j in range(3, 6):
cc = ax.fill_betweenx(range(3), range(3))
target = mcolors.to_rgba('C{}'.format(j))
assert tuple(cc.get_facecolors().squeeze()) == tuple(target)
target = mcolors.to_rgba('k')
for al in ['facecolor', 'facecolors', 'color']:
cc = ax.fill_between(range(3), range(3), **{al: 'k'})
assert tuple(cc.get_facecolors().squeeze()) == tuple(target)
edge_target = mcolors.to_rgba('k')
for j, el in enumerate(['edgecolor', 'edgecolors'], start=6):
cc = ax.fill_between(range(3), range(3), **{el: 'k'})
face_target = mcolors.to_rgba('C{}'.format(j))
assert tuple(cc.get_facecolors().squeeze()) == tuple(face_target)
assert tuple(cc.get_edgecolors().squeeze()) == tuple(edge_target)
def test_log_margins():
plt.rcParams['axes.autolimit_mode'] = 'data'
fig, ax = plt.subplots()
margin = 0.05
ax.set_xmargin(margin)
ax.semilogx([10, 100], [10, 100])
xlim0, xlim1 = ax.get_xlim()
transform = ax.xaxis.get_transform()
xlim0t, xlim1t = transform.transform([xlim0, xlim1])
x0t, x1t = transform.transform([10, 100])
delta = (x1t - x0t) * margin
assert_allclose([xlim0t + delta, xlim1t - delta], [x0t, x1t])
def test_color_length_mismatch():
N = 5
x, y = np.arange(N), np.arange(N)
colors = np.arange(N+1)
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.scatter(x, y, c=colors)
c_rgb = (0.5, 0.5, 0.5)
ax.scatter(x, y, c=c_rgb)
ax.scatter(x, y, c=[c_rgb] * N)
def test_eventplot_legend():
plt.eventplot([1.0], label='Label')
plt.legend()
def test_bar_broadcast_args():
fig, ax = plt.subplots()
# Check that a bar chart with a single height for all bars works.
ax.bar(range(4), 1)
# Check that a horizontal chart with one width works.
ax.bar(0, 1, bottom=range(4), width=1, orientation='horizontal')
# Check that edgecolor gets broadcast.
rect1, rect2 = ax.bar([0, 1], [0, 1], edgecolor=(.1, .2, .3, .4))
assert rect1.get_edgecolor() == rect2.get_edgecolor() == (.1, .2, .3, .4)
def test_invalid_axis_limits():
plt.plot([0, 1], [0, 1])
with pytest.raises(ValueError):
plt.xlim(np.nan)
with pytest.raises(ValueError):
plt.xlim(np.inf)
with pytest.raises(ValueError):
plt.ylim(np.nan)
with pytest.raises(ValueError):
plt.ylim(np.inf)
# Test all 4 combinations of logs/symlogs for minorticks_on()
@pytest.mark.parametrize('xscale', ['symlog', 'log'])
@pytest.mark.parametrize('yscale', ['symlog', 'log'])
def test_minorticks_on(xscale, yscale):
ax = plt.subplot(111)
ax.plot([1, 2, 3, 4])
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.minorticks_on()
def test_twinx_knows_limits():
fig, ax = plt.subplots()
ax.axvspan(1, 2)
xtwin = ax.twinx()
xtwin.plot([0, 0.5], [1, 2])
# control axis
fig2, ax2 = plt.subplots()
ax2.axvspan(1, 2)
ax2.plot([0, 0.5], [1, 2])
assert_array_equal(xtwin.viewLim.intervalx, ax2.viewLim.intervalx)
def test_zero_linewidth():
# Check that setting a zero linewidth doesn't error
plt.plot([0, 1], [0, 1], ls='--', lw=0)
def test_polar_gridlines():
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
# make all major grid lines lighter, only x grid lines set in 2.1.0
ax.grid(alpha=0.2)
# hide y tick labels, no effect in 2.1.0
plt.setp(ax.yaxis.get_ticklabels(), visible=False)
fig.canvas.draw()
assert ax.xaxis.majorTicks[0].gridline.get_alpha() == .2
assert ax.yaxis.majorTicks[0].gridline.get_alpha() == .2
def test_empty_errorbar_legend():
fig, ax = plt.subplots()
ax.errorbar([], [], xerr=[], label='empty y')
ax.errorbar([], [], yerr=[], label='empty x')
ax.legend()
def test_plot_columns_cycle_deprecation():
with pytest.warns(MatplotlibDeprecationWarning):
plt.plot(np.zeros((2, 2)), np.zeros((2, 3)))
# pdf and svg tests fail using travis' old versions of gs and inkscape.
@check_figures_equal(extensions=["png"])
def test_markerfacecolor_none_alpha(fig_test, fig_ref):
fig_test.subplots().plot(0, "o", mfc="none", alpha=.5)
fig_ref.subplots().plot(0, "o", mfc="w", alpha=.5)
def test_tick_padding_tightbbox():
"Test that tick padding gets turned off if axis is off"
plt.rcParams["xtick.direction"] = "out"
plt.rcParams["ytick.direction"] = "out"
fig, ax = plt.subplots()
bb = ax.get_tightbbox(fig.canvas.get_renderer())
ax.axis('off')
bb2 = ax.get_tightbbox(fig.canvas.get_renderer())
assert bb.x0 < bb2.x0
assert bb.y0 < bb2.y0
def test_zoom_inset():
dx, dy = 0.05, 0.05
# generate 2 2d grids for the x & y bounds
y, x = np.mgrid[slice(1, 5 + dy, dy),
slice(1, 5 + dx, dx)]
z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x)
fig, ax = plt.subplots()
ax.pcolormesh(x, y, z)
ax.set_aspect(1.)
ax.apply_aspect()
# we need to apply_aspect to make the drawing below work.
# Make the inset_axes... Position axes co-ordinates...
axin1 = ax.inset_axes([0.7, 0.7, 0.35, 0.35])
# redraw the data in the inset axes...
axin1.pcolormesh(x, y, z)
axin1.set_xlim([1.5, 2.15])
axin1.set_ylim([2, 2.5])
axin1.set_aspect(ax.get_aspect())
rec, connectors = ax.indicate_inset_zoom(axin1)
fig.canvas.draw()
xx = np.array([[1.5, 2.],
[2.15, 2.5]])
assert(np.all(rec.get_bbox().get_points() == xx))
xx = np.array([[0.6325, 0.692308],
[0.8425, 0.907692]])
np.testing.assert_allclose(axin1.get_position().get_points(),
xx, rtol=1e-4)
def test_set_position():
fig, ax = plt.subplots()
ax.set_aspect(3.)
ax.set_position([0.1, 0.1, 0.4, 0.4], which='both')
assert np.allclose(ax.get_position().width, 0.1)
ax.set_aspect(2.)
ax.set_position([0.1, 0.1, 0.4, 0.4], which='original')
assert np.allclose(ax.get_position().width, 0.15)
ax.set_aspect(3.)
ax.set_position([0.1, 0.1, 0.4, 0.4], which='active')
assert np.allclose(ax.get_position().width, 0.1)
def test_spines_properbbox_after_zoom():
fig, ax = plt.subplots()
bb = ax.spines['bottom'].get_window_extent(fig.canvas.get_renderer())
# this is what zoom calls:
ax._set_view_from_bbox((320, 320, 500, 500), 'in',
None, False, False)
bb2 = ax.spines['bottom'].get_window_extent(fig.canvas.get_renderer())
np.testing.assert_allclose(bb.get_points(), bb2.get_points(), rtol=1e-6)
def test_cartopy_backcompat():
import matplotlib
import matplotlib.axes
import matplotlib.axes._subplots
class Dummy(matplotlib.axes.Axes):
...
class DummySubplot(matplotlib.axes.SubplotBase, Dummy):
_axes_class = Dummy
matplotlib.axes._subplots._subplot_classes[Dummy] = DummySubplot
FactoryDummySubplot = matplotlib.axes.subplot_class_factory(Dummy)
assert DummySubplot is FactoryDummySubplot
def test_gettightbbox_ignoreNaN():
fig, ax = plt.subplots()
remove_ticks_and_titles(fig)
t = ax.text(np.NaN, 1, 'Boo')
renderer = fig.canvas.get_renderer()
np.testing.assert_allclose(ax.get_tightbbox(renderer).width, 496)
def test_scatter_series_non_zero_index(pd):
# create non-zero index
ids = range(10, 18)
x = pd.Series(np.random.uniform(size=8), index=ids)
y = pd.Series(np.random.uniform(size=8), index=ids)
c = pd.Series([1, 1, 1, 1, 1, 0, 0, 0], index=ids)
plt.scatter(x, y, c)
def test_scatter_empty_data():
# making sure this does not raise an exception
plt.scatter([], [])
plt.scatter([], [], s=[], c=[])
@image_comparison(baseline_images=['annotate_across_transforms'],
style='mpl20', extensions=['png'], remove_text=True)
def test_annotate_across_transforms():
x = np.linspace(0, 10, 200)
y = np.exp(-x) * np.sin(x)
fig, ax = plt.subplots(figsize=(3.39, 3))
ax.plot(x, y)
axins = ax.inset_axes([0.4, 0.5, 0.3, 0.3])
axins.set_aspect(0.2)
axins.xaxis.set_visible(False)
axins.yaxis.set_visible(False)
ax.annotate("", xy=(x[150], y[150]), xycoords=ax.transData,
xytext=(1, 0), textcoords=axins.transAxes,
arrowprops=dict(arrowstyle="->"))
def test_deprecated_uppercase_colors():
# Remove after end of deprecation period.
fig, ax = plt.subplots()
with pytest.warns(MatplotlibDeprecationWarning):
ax.plot([1, 2], color="B")
fig.canvas.draw()
@image_comparison(baseline_images=['secondary_xy'], style='mpl20',
extensions=['png'])
def test_secondary_xy():
fig, axs = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)
def invert(x):
with np.errstate(divide='ignore'):
return 1 / x
for nn, ax in enumerate(axs):
ax.plot(np.arange(2, 11), np.arange(2, 11))
if nn == 0:
secax = ax.secondary_xaxis
else:
secax = ax.secondary_yaxis
axsec = secax(0.2, functions=(invert, invert))
axsec = secax(0.4, functions=(lambda x: 2 * x, lambda x: x / 2))
axsec = secax(0.6, functions=(lambda x: x**2, lambda x: x**(1/2)))
axsec = secax(0.8)
def test_secondary_fail():
fig, ax = plt.subplots()
ax.plot(np.arange(2, 11), np.arange(2, 11))
with pytest.raises(ValueError):
axsec = ax.secondary_xaxis(0.2, functions=(lambda x: 1 / x))
with pytest.raises(ValueError):
axsec = ax.secondary_xaxis('right')
with pytest.raises(ValueError):
axsec = ax.secondary_yaxis('bottom')
def test_secondary_resize():
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(np.arange(2, 11), np.arange(2, 11))
def invert(x):
with np.errstate(divide='ignore'):
return 1 / x
axsec = ax.secondary_xaxis('top', functions=(invert, invert))
fig.canvas.draw()
fig.set_size_inches((7, 4))
assert_allclose(ax.get_position().extents, [0.125, 0.1, 0.9, 0.9])
def color_boxes(fig, axs):
"""
Helper for the tests below that test the extents of various axes elements
"""
fig.canvas.draw()
renderer = fig.canvas.get_renderer()
bbaxis = []
for nn, axx in enumerate([axs.xaxis, axs.yaxis]):
bb = axx.get_tightbbox(renderer)
if bb:
axisr = plt.Rectangle((bb.x0, bb.y0), width=bb.width,
height=bb.height, linewidth=0.7, edgecolor='y',
facecolor="none", transform=None, zorder=3)
fig.add_artist(axisr)
bbaxis += [bb]
bbspines = []
for nn, a in enumerate(['bottom', 'top', 'left', 'right']):
bb = axs.spines[a].get_window_extent(renderer)
spiner = plt.Rectangle((bb.x0, bb.y0), width=bb.width,
height=bb.height, linewidth=0.7,
edgecolor="green", facecolor="none",
transform=None, zorder=3)
fig.add_artist(spiner)
bbspines += [bb]
bb = axs.get_window_extent()
rect2 = plt.Rectangle((bb.x0, bb.y0), width=bb.width, height=bb.height,
linewidth=1.5, edgecolor="magenta",
facecolor="none", transform=None, zorder=2)
fig.add_artist(rect2)
bbax = bb
bb2 = axs.get_tightbbox(renderer)
rect2 = plt.Rectangle((bb2.x0, bb2.y0), width=bb2.width,
height=bb2.height, linewidth=3, edgecolor="red",
facecolor="none", transform=None, zorder=1)
fig.add_artist(rect2)
bbtb = bb2
return bbaxis, bbspines, bbax, bbtb
def test_normal_axes():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
plt.close(fig)
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
# test the axis bboxes
target = [
[123.375, 75.88888888888886, 983.25, 33.0],
[85.51388888888889, 99.99999999999997, 53.375, 993.0]
]
for nn, b in enumerate(bbaxis):
targetbb = mtransforms.Bbox.from_bounds(*target[nn])
assert_array_almost_equal(b.bounds, targetbb.bounds, decimal=2)
target = [
[150.0, 119.999, 930.0, 11.111],
[150.0, 1080.0, 930.0, 0.0],
[150.0, 119.9999, 11.111, 960.0],
[1068.8888, 119.9999, 11.111, 960.0]
]
for nn, b in enumerate(bbspines):
targetbb = mtransforms.Bbox.from_bounds(*target[nn])
assert_array_almost_equal(b.bounds, targetbb.bounds, decimal=2)
target = [150.0, 119.99999999999997, 930.0, 960.0]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_array_almost_equal(bbax.bounds, targetbb.bounds, decimal=2)
target = [85.5138, 75.88888, 1021.11, 1017.11]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_array_almost_equal(bbtb.bounds, targetbb.bounds, decimal=2)
# test that get_position roundtrips to get_window_extent
axbb = ax.get_position().transformed(fig.transFigure).bounds
assert_array_almost_equal(axbb, ax.get_window_extent().bounds, decimal=2)
def test_nodecorator():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
ax.set(xticklabels=[], yticklabels=[])
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
# test the axis bboxes
target = [
None,
None
]
for nn, b in enumerate(bbaxis):
assert b is None
target = [
[150.0, 119.999, 930.0, 11.111],
[150.0, 1080.0, 930.0, 0.0],
[150.0, 119.9999, 11.111, 960.0],
[1068.8888, 119.9999, 11.111, 960.0]
]
for nn, b in enumerate(bbspines):
targetbb = mtransforms.Bbox.from_bounds(*target[nn])
assert_allclose(b.bounds, targetbb.bounds, atol=1e-2)
target = [150.0, 119.99999999999997, 930.0, 960.0]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbax.bounds, targetbb.bounds, atol=1e-2)
target = [150., 120., 930., 960.]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbtb.bounds, targetbb.bounds, atol=1e-2)
def test_displaced_spine():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
ax.set(xticklabels=[], yticklabels=[])
ax.spines['bottom'].set_position(('axes', -0.1))
fig.canvas.draw()
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
target = [
[150., 24., 930., 11.111111],
[150.0, 1080.0, 930.0, 0.0],
[150.0, 119.9999, 11.111, 960.0],
[1068.8888, 119.9999, 11.111, 960.0]
]
for nn, b in enumerate(bbspines):
targetbb = mtransforms.Bbox.from_bounds(*target[nn])
target = [150.0, 119.99999999999997, 930.0, 960.0]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbax.bounds, targetbb.bounds, atol=1e-2)
target = [150., 24., 930., 1056.]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbtb.bounds, targetbb.bounds, atol=1e-2)
def test_tickdirs():
"""
Switch the tickdirs and make sure the bboxes switch with them
"""
targets = [[[150.0, 120.0, 930.0, 11.1111],
[150.0, 120.0, 11.111, 960.0]],
[[150.0, 108.8889, 930.0, 11.111111111111114],
[138.889, 120, 11.111, 960.0]],
[[150.0, 114.44444444444441, 930.0, 11.111111111111114],
[144.44444444444446, 119.999, 11.111, 960.0]]]
for dnum, dirs in enumerate(['in', 'out', 'inout']):
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
ax.tick_params(direction=dirs)
fig.canvas.draw()
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
for nn, num in enumerate([0, 2]):
targetbb = mtransforms.Bbox.from_bounds(*targets[dnum][nn])
assert_allclose(bbspines[num].bounds, targetbb.bounds,
atol=1e-2)
def test_minor_accountedfor():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
ax.tick_params(which='both', direction='out')
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
targets = [[150.0, 108.88888888888886, 930.0, 11.111111111111114],
[138.8889, 119.9999, 11.1111, 960.0]]
for n in range(2):
targetbb = mtransforms.Bbox.from_bounds(*targets[n])
assert_allclose(bbspines[n * 2].bounds, targetbb.bounds,
atol=1e-2)
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
ax.tick_params(which='both', direction='out')
ax.minorticks_on()
ax.tick_params(axis='both', which='minor', length=30)
fig.canvas.draw()
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
targets = [[150.0, 36.66666666666663, 930.0, 83.33333333333334],
[66.6667, 120.0, 83.3333, 960.0]]
for n in range(2):
targetbb = mtransforms.Bbox.from_bounds(*targets[n])
assert_allclose(bbspines[n * 2].bounds, targetbb.bounds,
atol=1e-2)
def test_get_tightbbox_polar():
fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
fig.canvas.draw()
bb = ax.get_tightbbox(fig.canvas.get_renderer())
assert_allclose(bb.extents,
[107.7778, 29.2778, 539.7847, 450.7222], rtol=1e-03)
@check_figures_equal(extensions=["png"])
def test_axis_bool_arguments(fig_test, fig_ref):
# Test if False and "off" give the same
fig_test.add_subplot(211).axis(False)
fig_ref.add_subplot(211).axis("off")
# Test if True after False gives the same as "on"
ax = fig_test.add_subplot(212)
ax.axis(False)
ax.axis(True)
fig_ref.add_subplot(212).axis("on")
def test_datetime_masked():
# make sure that all-masked data falls back to the viewlim
# set in convert.axisinfo....
x = np.array([datetime.datetime(2017, 1, n) for n in range(1, 6)])
y = np.array([1, 2, 3, 4, 5])
m = np.ma.masked_greater(y, 0)
fig, ax = plt.subplots()
ax.plot(x, m)
# these are the default viewlim
assert ax.get_xlim() == (730120.0, 733773.0)
def test_hist_auto_bins():
_, bins, _ = plt.hist([[1, 2, 3], [3, 4, 5, 6]], bins='auto')
assert bins[0] <= 1
assert bins[-1] >= 6
def test_hist_nan_data():
fig, (ax1, ax2) = plt.subplots(2)
data = [1, 2, 3]
nan_data = data + [np.nan]
bins, edges, _ = ax1.hist(data)
with np.errstate(invalid='ignore'):
nanbins, nanedges, _ = ax2.hist(nan_data)
assert np.allclose(bins, nanbins)
assert np.allclose(edges, nanedges)
|
f92d65ecce1df9cf9171cbe1816999bfb62120c902ef7bfe62fc7473c72d3abf
|
import io
import os
from pathlib import Path
import sys
import tempfile
import warnings
import numpy as np
import pytest
from matplotlib import dviread, pyplot as plt, checkdep_usetex, rcParams
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.testing.compare import compare_images
from matplotlib.testing.decorators import image_comparison
from matplotlib.testing.determinism import (_determinism_source_date_epoch,
_determinism_check)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
needs_usetex = pytest.mark.skipif(
not checkdep_usetex(True),
reason="This test needs a TeX installation")
@image_comparison(baseline_images=['pdf_use14corefonts'],
extensions=['pdf'])
def test_use14corefonts():
rcParams['pdf.use14corefonts'] = True
rcParams['font.family'] = 'sans-serif'
rcParams['font.size'] = 8
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['pdf.compression'] = 0
text = '''A three-line text positioned just above a blue line
and containing some French characters and the euro symbol:
"Merci pépé pour les 10 €"'''
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('Test PDF backend with option use14corefonts=True')
ax.text(0.5, 0.5, text, horizontalalignment='center',
verticalalignment='bottom',
fontsize=14)
ax.axhline(0.5, linewidth=0.5)
def test_type42():
rcParams['pdf.fonttype'] = 42
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(io.BytesIO())
def test_multipage_pagecount():
with PdfPages(io.BytesIO()) as pdf:
assert pdf.get_pagecount() == 0
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(pdf, format="pdf")
assert pdf.get_pagecount() == 1
pdf.savefig()
assert pdf.get_pagecount() == 2
def test_multipage_properfinalize():
pdfio = io.BytesIO()
with PdfPages(pdfio) as pdf:
for i in range(10):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('This is a long title')
fig.savefig(pdf, format="pdf")
pdfio.seek(0)
assert sum(b'startxref' in line for line in pdfio) == 1
assert sys.getsizeof(pdfio) < 40000
def test_multipage_keep_empty():
from matplotlib.backends.backend_pdf import PdfPages
from tempfile import NamedTemporaryFile
# test empty pdf files
# test that an empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
assert os.path.exists(filename)
os.remove(filename)
# test if an empty pdf is deleting itself afterwards with keep_empty=False
with PdfPages(filename, keep_empty=False) as pdf:
pass
assert not os.path.exists(filename)
# test pdf files with content, they should never be deleted
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
# test that a non-empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
# test that a non-empty pdf is left behind with keep_empty=False
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp, keep_empty=False) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
def test_composite_image():
# Test that figures can be saved with and without combining multiple images
# (on a single set of axes) into a single composite image.
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(0, 3)
ax.imshow(Z, extent=[0, 1, 0, 1])
ax.imshow(Z[::-1], extent=[2, 3, 0, 1])
plt.rcParams['image.composite_image'] = True
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images) == 1
plt.rcParams['image.composite_image'] = False
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images) == 2
def test_pdfpages_fspath():
with PdfPages(Path(os.devnull)) as pdf:
pdf.savefig(plt.figure())
def test_source_date_epoch():
"""Test SOURCE_DATE_EPOCH support for PDF output"""
_determinism_source_date_epoch("pdf", b"/CreationDate (D:20000101000000Z)")
def test_determinism_plain():
"""Test for reproducible PDF output: simple figure"""
_determinism_check('', format="pdf")
def test_determinism_images():
"""Test for reproducible PDF output: figure with different images"""
_determinism_check('i', format="pdf")
def test_determinism_hatches():
"""Test for reproducible PDF output: figure with different hatches"""
_determinism_check('h', format="pdf")
def test_determinism_markers():
"""Test for reproducible PDF output: figure with different markers"""
_determinism_check('m', format="pdf")
def test_determinism_all():
"""Test for reproducible PDF output"""
_determinism_check(format="pdf")
@image_comparison(baseline_images=['hatching_legend'],
extensions=['pdf'])
def test_hatching_legend():
"""Test for correct hatching on patches in legend"""
fig = plt.figure(figsize=(1, 2))
a = plt.Rectangle([0, 0], 0, 0, facecolor="green", hatch="XXXX")
b = plt.Rectangle([0, 0], 0, 0, facecolor="blue", hatch="XXXX")
fig.legend([a, b, a, b], ["", "", "", ""])
@image_comparison(baseline_images=['grayscale_alpha'],
extensions=['pdf'])
def test_grayscale_alpha():
"""Masking images with NaN did not work for grayscale images"""
x, y = np.ogrid[-2:2:.1, -2:2:.1]
dd = np.exp(-(x**2 + y**2))
dd[dd < .1] = np.nan
fig, ax = plt.subplots()
ax.imshow(dd, interpolation='none', cmap='gray_r')
ax.set_xticks([])
ax.set_yticks([])
# This tests tends to hit a TeX cache lock on AppVeyor.
@pytest.mark.flaky(reruns=3)
@needs_usetex
def test_missing_psfont(monkeypatch):
"""An error is raised if a TeX font lacks a Type-1 equivalent"""
def psfont(*args, **kwargs):
return dviread.PsFont(texname='texfont', psname='Some Font',
effects=None, encoding=None, filename=None)
monkeypatch.setattr(dviread.PsfontsMap, '__getitem__', psfont)
rcParams['text.usetex'] = True
fig, ax = plt.subplots()
ax.text(0.5, 0.5, 'hello')
with tempfile.TemporaryFile() as tmpfile, pytest.raises(ValueError):
fig.savefig(tmpfile, format='pdf')
@pytest.mark.style('default')
def test_pdf_savefig_when_color_is_none(tmpdir):
fig, ax = plt.subplots()
plt.axis('off')
ax.plot(np.sin(np.linspace(-5, 5, 100)), 'v', c='none')
actual_image = tmpdir.join('figure.pdf')
expected_image = tmpdir.join('figure.eps')
fig.savefig(str(actual_image), format='pdf')
fig.savefig(str(expected_image), format='eps')
result = compare_images(str(actual_image), str(expected_image), 0)
assert result is None
@needs_usetex
def test_failing_latex(tmpdir):
"""Test failing latex subprocess call"""
path = str(tmpdir.join("tmpoutput.pdf"))
rcParams['text.usetex'] = True
# This fails with "Double subscript"
plt.xlabel("$22_2_2$")
with pytest.raises(RuntimeError):
plt.savefig(path)
def test_empty_rasterized():
# Check that empty figures that are rasterised save to pdf files fine
fig, ax = plt.subplots()
ax.plot([], [], rasterized=True)
fig.savefig(io.BytesIO(), format="pdf")
|
dd418546d406a01c4ce0cb31b15502a9f6608747a5106ea84350bc346cc1f000
|
import re
import numpy as np
import pytest
from matplotlib import _preprocess_data
# Notes on testing the plotting functions itself
# * the individual decorated plotting functions are tested in 'test_axes.py'
# * that pyplot functions accept a data kwarg is only tested in
# test_axes.test_pie_linewidth_0
# this gets used in multiple tests, so define it here
@_preprocess_data(replace_names=["x", "y"], label_namer="y")
def plot_func(ax, x, y, ls="x", label=None, w="xyz"):
return ("x: %s, y: %s, ls: %s, w: %s, label: %s" % (
list(x), list(y), ls, w, label))
all_funcs = [plot_func]
all_func_ids = ['plot_func']
def test_compiletime_checks():
"""test decorator invocations -> no replacements"""
def func(ax, x, y): pass
def func_args(ax, x, y, *args): pass
def func_kwargs(ax, x, y, **kwargs): pass
def func_no_ax_args(*args, **kwargs): pass
# this is ok
_preprocess_data(replace_names=["x", "y"])(func)
_preprocess_data(replace_names=["x", "y"])(func_kwargs)
# this has "enough" information to do all the replaces
_preprocess_data(replace_names=["x", "y"])(func_args)
# no positional_parameter_names but needed due to replaces
with pytest.raises(AssertionError):
# z is unknown
_preprocess_data(replace_names=["x", "y", "z"])(func_args)
# no replacements at all -> all ok...
_preprocess_data(replace_names=[], label_namer=None)(func)
_preprocess_data(replace_names=[], label_namer=None)(func_args)
_preprocess_data(replace_names=[], label_namer=None)(func_kwargs)
_preprocess_data(replace_names=[], label_namer=None)(func_no_ax_args)
# label namer is unknown
with pytest.raises(AssertionError):
_preprocess_data(label_namer="z")(func)
with pytest.raises(AssertionError):
_preprocess_data(label_namer="z")(func_args)
@pytest.mark.parametrize('func', all_funcs, ids=all_func_ids)
def test_function_call_without_data(func):
"""test without data -> no replacements"""
assert (func(None, "x", "y") ==
"x: ['x'], y: ['y'], ls: x, w: xyz, label: None")
assert (func(None, x="x", y="y") ==
"x: ['x'], y: ['y'], ls: x, w: xyz, label: None")
assert (func(None, "x", "y", label="") ==
"x: ['x'], y: ['y'], ls: x, w: xyz, label: ")
assert (func(None, "x", "y", label="text") ==
"x: ['x'], y: ['y'], ls: x, w: xyz, label: text")
assert (func(None, x="x", y="y", label="") ==
"x: ['x'], y: ['y'], ls: x, w: xyz, label: ")
assert (func(None, x="x", y="y", label="text") ==
"x: ['x'], y: ['y'], ls: x, w: xyz, label: text")
@pytest.mark.parametrize('func', all_funcs, ids=all_func_ids)
def test_function_call_with_dict_data(func):
"""Test with dict data -> label comes from the value of 'x' parameter """
data = {"a": [1, 2], "b": [8, 9], "w": "NOT"}
assert (func(None, "a", "b", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: b")
assert (func(None, x="a", y="b", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: b")
assert (func(None, "a", "b", label="", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: ")
assert (func(None, "a", "b", label="text", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: text")
assert (func(None, x="a", y="b", label="", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: ")
assert (func(None, x="a", y="b", label="text", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: text")
@pytest.mark.parametrize('func', all_funcs, ids=all_func_ids)
def test_function_call_with_dict_data_not_in_data(func):
"test for the case that one var is not in data -> half replaces, half kept"
data = {"a": [1, 2], "w": "NOT"}
assert (func(None, "a", "b", data=data) ==
"x: [1, 2], y: ['b'], ls: x, w: xyz, label: b")
assert (func(None, x="a", y="b", data=data) ==
"x: [1, 2], y: ['b'], ls: x, w: xyz, label: b")
assert (func(None, "a", "b", label="", data=data) ==
"x: [1, 2], y: ['b'], ls: x, w: xyz, label: ")
assert (func(None, "a", "b", label="text", data=data) ==
"x: [1, 2], y: ['b'], ls: x, w: xyz, label: text")
assert (func(None, x="a", y="b", label="", data=data) ==
"x: [1, 2], y: ['b'], ls: x, w: xyz, label: ")
assert (func(None, x="a", y="b", label="text", data=data) ==
"x: [1, 2], y: ['b'], ls: x, w: xyz, label: text")
@pytest.mark.parametrize('func', all_funcs, ids=all_func_ids)
def test_function_call_with_pandas_data(func, pd):
"""test with pandas dataframe -> label comes from data["col"].name """
data = pd.DataFrame({"a": np.array([1, 2], dtype=np.int32),
"b": np.array([8, 9], dtype=np.int32),
"w": ["NOT", "NOT"]})
assert (func(None, "a", "b", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: b")
assert (func(None, x="a", y="b", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: b")
assert (func(None, "a", "b", label="", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: ")
assert (func(None, "a", "b", label="text", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: text")
assert (func(None, x="a", y="b", label="", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: ")
assert (func(None, x="a", y="b", label="text", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: text")
def test_function_call_replace_all():
"""Test without a "replace_names" argument, all vars should be replaced"""
data = {"a": [1, 2], "b": [8, 9], "x": "xyz"}
@_preprocess_data(label_namer="y")
def func_replace_all(ax, x, y, ls="x", label=None, w="NOT"):
return "x: %s, y: %s, ls: %s, w: %s, label: %s" % (
list(x), list(y), ls, w, label)
assert (func_replace_all(None, "a", "b", w="x", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: b")
assert (func_replace_all(None, x="a", y="b", w="x", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: b")
assert (func_replace_all(None, "a", "b", w="x", label="", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: ")
assert (
func_replace_all(None, "a", "b", w="x", label="text", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: text")
assert (
func_replace_all(None, x="a", y="b", w="x", label="", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: ")
assert (
func_replace_all(None, x="a", y="b", w="x", label="text", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: text")
def test_no_label_replacements():
"""Test with "label_namer=None" -> no label replacement at all"""
@_preprocess_data(replace_names=["x", "y"], label_namer=None)
def func_no_label(ax, x, y, ls="x", label=None, w="xyz"):
return "x: %s, y: %s, ls: %s, w: %s, label: %s" % (
list(x), list(y), ls, w, label)
data = {"a": [1, 2], "b": [8, 9], "w": "NOT"}
assert (func_no_label(None, "a", "b", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: None")
assert (func_no_label(None, x="a", y="b", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: None")
assert (func_no_label(None, "a", "b", label="", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: ")
assert (func_no_label(None, "a", "b", label="text", data=data) ==
"x: [1, 2], y: [8, 9], ls: x, w: xyz, label: text")
def test_more_args_than_pos_parameter():
@_preprocess_data(replace_names=["x", "y"], label_namer="y")
def func(ax, x, y, z=1):
pass
data = {"a": [1, 2], "b": [8, 9], "w": "NOT"}
with pytest.raises(TypeError):
func(None, "a", "b", "z", "z", data=data)
def test_docstring_addition():
@_preprocess_data()
def funcy(ax, *args, **kwargs):
"""Funcy does nothing"""
pass
assert re.search(r".*All positional and all keyword arguments\.",
funcy.__doc__)
assert not re.search(r".*All positional arguments\.",
funcy.__doc__)
assert not re.search(r".*All arguments with the following names: .*",
funcy.__doc__)
@_preprocess_data(replace_names=[])
def funcy(ax, x, y, z, bar=None):
"""Funcy does nothing"""
pass
assert not re.search(r".*All positional arguments\.",
funcy.__doc__)
assert not re.search(r".*All positional and all keyword arguments\.",
funcy.__doc__)
assert not re.search(r".*All arguments with the following names: .*",
funcy.__doc__)
@_preprocess_data(replace_names=["bar"])
def funcy(ax, x, y, z, bar=None):
"""Funcy does nothing"""
pass
assert not re.search(r".*All positional arguments\.",
funcy.__doc__)
assert re.search(r".*All arguments with the following names: 'bar'\.",
funcy.__doc__)
assert not re.search(r".*All positional and all keyword arguments\.",
funcy.__doc__)
@_preprocess_data(replace_names=["x", "bar"])
def funcy(ax, x, y, z, bar=None):
"""Funcy does nothing"""
pass
# lists can print in any order, so test for both x,bar and bar,x
assert re.search(r".*All arguments with the following names: '.*', '.*'\.",
funcy.__doc__)
assert re.search(r".*'x'.*", funcy.__doc__)
assert re.search(r".*'bar'.*", funcy.__doc__)
assert not re.search(r".*All positional and all keyword arguments\.",
funcy.__doc__)
assert not re.search(r".*All positional arguments\.",
funcy.__doc__)
|
3eeec1fe6b905b464af286e17a583234616b35d92b936056cbdd9fe6752b48b5
|
import datetime
import tempfile
from unittest.mock import Mock
import dateutil.tz
import dateutil.rrule
import numpy as np
import pytest
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib.cbook import MatplotlibDeprecationWarning
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
from matplotlib import rc_context
def __has_pytz():
try:
import pytz
return True
except ImportError:
return False
def test_date_numpyx():
# test that numpy dates work properly...
base = datetime.datetime(2017, 1, 1)
time = [base + datetime.timedelta(days=x) for x in range(0, 3)]
timenp = np.array(time, dtype='datetime64[ns]')
data = np.array([0., 2., 1.])
fig = plt.figure(figsize=(10, 2))
ax = fig.add_subplot(1, 1, 1)
h, = ax.plot(time, data)
hnp, = ax.plot(timenp, data)
assert np.array_equal(h.get_xdata(orig=False), hnp.get_xdata(orig=False))
fig = plt.figure(figsize=(10, 2))
ax = fig.add_subplot(1, 1, 1)
h, = ax.plot(data, time)
hnp, = ax.plot(data, timenp)
assert np.array_equal(h.get_ydata(orig=False), hnp.get_ydata(orig=False))
@pytest.mark.parametrize('t0', [datetime.datetime(2017, 1, 1, 0, 1, 1),
[datetime.datetime(2017, 1, 1, 0, 1, 1),
datetime.datetime(2017, 1, 1, 1, 1, 1)],
[[datetime.datetime(2017, 1, 1, 0, 1, 1),
datetime.datetime(2017, 1, 1, 1, 1, 1)],
[datetime.datetime(2017, 1, 1, 2, 1, 1),
datetime.datetime(2017, 1, 1, 3, 1, 1)]]])
@pytest.mark.parametrize('dtype', ['datetime64[s]',
'datetime64[us]',
'datetime64[ms]',
'datetime64[ns]'])
def test_date_date2num_numpy(t0, dtype):
time = mdates.date2num(t0)
tnp = np.array(t0, dtype=dtype)
nptime = mdates.date2num(tnp)
assert np.array_equal(time, nptime)
@pytest.mark.parametrize('dtype', ['datetime64[s]',
'datetime64[us]',
'datetime64[ms]',
'datetime64[ns]'])
def test_date2num_NaT(dtype):
t0 = datetime.datetime(2017, 1, 1, 0, 1, 1)
tmpl = [mdates.date2num(t0), np.nan]
tnp = np.array([t0, 'NaT'], dtype=dtype)
nptime = mdates.date2num(tnp)
np.testing.assert_array_equal(tmpl, nptime)
@pytest.mark.parametrize('units', ['s', 'ms', 'us', 'ns'])
def test_date2num_NaT_scalar(units):
tmpl = mdates.date2num(np.datetime64('NaT', units))
assert np.isnan(tmpl)
@image_comparison(baseline_images=['date_empty'], extensions=['png'])
def test_date_empty():
# make sure mpl does the right thing when told to plot dates even
# if no date data has been presented, cf
# http://sourceforge.net/tracker/?func=detail&aid=2850075&group_id=80706&atid=560720
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.xaxis_date()
@image_comparison(baseline_images=['date_axhspan'], extensions=['png'])
def test_date_axhspan():
# test ax hspan with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhspan(t0, tf, facecolor="blue", alpha=0.25)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.subplots_adjust(left=0.25)
@image_comparison(baseline_images=['date_axvspan'], extensions=['png'])
def test_date_axvspan():
# test ax hspan with date inputs
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2010, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axvspan(t0, tf, facecolor="blue", alpha=0.25)
ax.set_xlim(t0 - datetime.timedelta(days=720),
tf + datetime.timedelta(days=720))
fig.autofmt_xdate()
@image_comparison(baseline_images=['date_axhline'],
extensions=['png'])
def test_date_axhline():
# test ax hline with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 31)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhline(t0, color="blue", lw=3)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.subplots_adjust(left=0.25)
@image_comparison(baseline_images=['date_axvline'],
extensions=['png'])
def test_date_axvline():
# test ax hline with date inputs
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2000, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axvline(t0, color="red", lw=3)
ax.set_xlim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.autofmt_xdate()
def test_too_many_date_ticks():
# Attempt to test SF 2715172, see
# https://sourceforge.net/tracker/?func=detail&aid=2715172&group_id=80706&atid=560720
# setting equal datetimes triggers and expander call in
# transforms.nonsingular which results in too many ticks in the
# DayLocator. This should trigger a Locator.MAXTICKS RuntimeError
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2000, 1, 20)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
with pytest.warns(UserWarning) as rec:
ax.set_xlim((t0, tf), auto=True)
assert len(rec) == 1
assert \
'Attempting to set identical left == right' in str(rec[0].message)
ax.plot([], [])
ax.xaxis.set_major_locator(mdates.DayLocator())
with pytest.raises(RuntimeError):
fig.savefig('junk.png')
@image_comparison(baseline_images=['RRuleLocator_bounds'], extensions=['png'])
def test_RRuleLocator():
import matplotlib.testing.jpl_units as units
units.register()
# This will cause the RRuleLocator to go out of bounds when it tries
# to add padding to the limits, so we make sure it caps at the correct
# boundary values.
t0 = datetime.datetime(1000, 1, 1)
tf = datetime.datetime(6000, 1, 1)
fig = plt.figure()
ax = plt.subplot(111)
ax.set_autoscale_on(True)
ax.plot([t0, tf], [0.0, 1.0], marker='o')
rrule = mdates.rrulewrapper(dateutil.rrule.YEARLY, interval=500)
locator = mdates.RRuleLocator(rrule)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
ax.autoscale_view()
fig.autofmt_xdate()
def test_RRuleLocator_dayrange():
loc = mdates.DayLocator()
x1 = datetime.datetime(year=1, month=1, day=1, tzinfo=mdates.UTC)
y1 = datetime.datetime(year=1, month=1, day=16, tzinfo=mdates.UTC)
loc.tick_values(x1, y1)
# On success, no overflow error shall be thrown
@image_comparison(baseline_images=['DateFormatter_fractionalSeconds'],
extensions=['png'])
def test_DateFormatter():
import matplotlib.testing.jpl_units as units
units.register()
# Lets make sure that DateFormatter will allow us to have tick marks
# at intervals of fractional seconds.
t0 = datetime.datetime(2001, 1, 1, 0, 0, 0)
tf = datetime.datetime(2001, 1, 1, 0, 0, 1)
fig = plt.figure()
ax = plt.subplot(111)
ax.set_autoscale_on(True)
ax.plot([t0, tf], [0.0, 1.0], marker='o')
# rrule = mpldates.rrulewrapper( dateutil.rrule.YEARLY, interval=500 )
# locator = mpldates.RRuleLocator( rrule )
# ax.xaxis.set_major_locator( locator )
# ax.xaxis.set_major_formatter( mpldates.AutoDateFormatter(locator) )
ax.autoscale_view()
fig.autofmt_xdate()
def test_locator_set_formatter():
"""
Test if setting the locator only will update the AutoDateFormatter to use
the new locator.
"""
plt.rcParams["date.autoformatter.minute"] = "%d %H:%M"
t = [datetime.datetime(2018, 9, 30, 8, 0),
datetime.datetime(2018, 9, 30, 8, 59),
datetime.datetime(2018, 9, 30, 10, 30)]
x = [2, 3, 1]
fig, ax = plt.subplots()
ax.plot(t, x)
ax.xaxis.set_major_locator(mdates.MinuteLocator((0, 30)))
fig.canvas.draw()
ticklabels = [tl.get_text() for tl in ax.get_xticklabels()]
expected = ['30 08:00', '30 08:30', '30 09:00',
'30 09:30', '30 10:00', '30 10:30']
assert ticklabels == expected
ax.xaxis.set_major_locator(mticker.NullLocator())
ax.xaxis.set_minor_locator(mdates.MinuteLocator((5, 55)))
decoy_loc = mdates.MinuteLocator((12, 27))
ax.xaxis.set_minor_formatter(mdates.AutoDateFormatter(decoy_loc))
ax.xaxis.set_minor_locator(mdates.MinuteLocator((15, 45)))
fig.canvas.draw()
ticklabels = [tl.get_text() for tl in ax.get_xticklabels(which="minor")]
expected = ['30 08:15', '30 08:45', '30 09:15', '30 09:45', '30 10:15']
assert ticklabels == expected
def test_date_formatter_strftime():
"""
Tests that DateFormatter matches datetime.strftime,
check microseconds for years before 1900 for bug #3179
as well as a few related issues for years before 1900.
"""
def test_strftime_fields(dt):
"""For datetime object dt, check DateFormatter fields"""
# Note: the last couple of %%s are to check multiple %s are handled
# properly; %% should get replaced by %.
formatter = mdates.DateFormatter("%w %d %m %y %Y %H %I %M %S %%%f %%x")
# Compute date fields without using datetime.strftime,
# since datetime.strftime does not work before year 1900
formatted_date_str = (
"{weekday} {day:02d} {month:02d} {year:02d} {full_year:04d} "
"{hour24:02d} {hour12:02d} {minute:02d} {second:02d} "
"%{microsecond:06d} %x"
.format(
weekday=str((dt.weekday() + 1) % 7),
day=dt.day,
month=dt.month,
year=dt.year % 100,
full_year=dt.year,
hour24=dt.hour,
hour12=((dt.hour-1) % 12) + 1,
minute=dt.minute,
second=dt.second,
microsecond=dt.microsecond))
with pytest.warns(MatplotlibDeprecationWarning):
assert formatter.strftime(dt) == formatted_date_str
try:
# Test strftime("%x") with the current locale.
import locale # Might not exist on some platforms, such as Windows
locale_formatter = mdates.DateFormatter("%x")
locale_d_fmt = locale.nl_langinfo(locale.D_FMT)
expanded_formatter = mdates.DateFormatter(locale_d_fmt)
with pytest.warns(MatplotlibDeprecationWarning):
assert locale_formatter.strftime(dt) == \
expanded_formatter.strftime(dt)
except (ImportError, AttributeError):
pass
for year in range(1, 3000, 71):
# Iterate through random set of years
test_strftime_fields(datetime.datetime(year, 1, 1))
test_strftime_fields(datetime.datetime(year, 2, 3, 4, 5, 6, 12345))
def test_date_formatter_callable():
scale = -11
locator = Mock(_get_unit=Mock(return_value=scale))
callable_formatting_function = (lambda dates, _:
[dt.strftime('%d-%m//%Y') for dt in dates])
formatter = mdates.AutoDateFormatter(locator)
formatter.scaled[-10] = callable_formatting_function
assert formatter([datetime.datetime(2014, 12, 25)]) == ['25-12//2014']
def test_drange():
"""
This test should check if drange works as expected, and if all the
rounding errors are fixed
"""
start = datetime.datetime(2011, 1, 1, tzinfo=mdates.UTC)
end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)
delta = datetime.timedelta(hours=1)
# We expect 24 values in drange(start, end, delta), because drange returns
# dates from an half open interval [start, end)
assert len(mdates.drange(start, end, delta)) == 24
# if end is a little bit later, we expect the range to contain one element
# more
end = end + datetime.timedelta(microseconds=1)
assert len(mdates.drange(start, end, delta)) == 25
# reset end
end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)
# and tst drange with "complicated" floats:
# 4 hours = 1/6 day, this is an "dangerous" float
delta = datetime.timedelta(hours=4)
daterange = mdates.drange(start, end, delta)
assert len(daterange) == 6
assert mdates.num2date(daterange[-1]) == (end - delta)
def test_empty_date_with_year_formatter():
# exposes sf bug 2861426:
# https://sourceforge.net/tracker/?func=detail&aid=2861426&group_id=80706&atid=560720
# update: I am no longer believe this is a bug, as I commented on
# the tracker. The question is now: what to do with this test
import matplotlib.dates as dates
fig = plt.figure()
ax = fig.add_subplot(111)
yearFmt = dates.DateFormatter('%Y')
ax.xaxis.set_major_formatter(yearFmt)
with tempfile.TemporaryFile() as fh:
with pytest.raises(ValueError):
fig.savefig(fh)
def test_auto_date_locator():
def _create_auto_date_locator(date1, date2):
locator = mdates.AutoDateLocator(interval_multiples=False)
locator.create_dummy_axis()
locator.set_view_interval(mdates.date2num(date1),
mdates.date2num(date2))
return locator
d1 = datetime.datetime(1990, 1, 1)
results = ([datetime.timedelta(weeks=52 * 200),
['1990-01-01 00:00:00+00:00', '2010-01-01 00:00:00+00:00',
'2030-01-01 00:00:00+00:00', '2050-01-01 00:00:00+00:00',
'2070-01-01 00:00:00+00:00', '2090-01-01 00:00:00+00:00',
'2110-01-01 00:00:00+00:00', '2130-01-01 00:00:00+00:00',
'2150-01-01 00:00:00+00:00', '2170-01-01 00:00:00+00:00']
],
[datetime.timedelta(weeks=52),
['1990-01-01 00:00:00+00:00', '1990-02-01 00:00:00+00:00',
'1990-03-01 00:00:00+00:00', '1990-04-01 00:00:00+00:00',
'1990-05-01 00:00:00+00:00', '1990-06-01 00:00:00+00:00',
'1990-07-01 00:00:00+00:00', '1990-08-01 00:00:00+00:00',
'1990-09-01 00:00:00+00:00', '1990-10-01 00:00:00+00:00',
'1990-11-01 00:00:00+00:00', '1990-12-01 00:00:00+00:00']
],
[datetime.timedelta(days=141),
['1990-01-05 00:00:00+00:00', '1990-01-26 00:00:00+00:00',
'1990-02-16 00:00:00+00:00', '1990-03-09 00:00:00+00:00',
'1990-03-30 00:00:00+00:00', '1990-04-20 00:00:00+00:00',
'1990-05-11 00:00:00+00:00']
],
[datetime.timedelta(days=40),
['1990-01-03 00:00:00+00:00', '1990-01-10 00:00:00+00:00',
'1990-01-17 00:00:00+00:00', '1990-01-24 00:00:00+00:00',
'1990-01-31 00:00:00+00:00', '1990-02-07 00:00:00+00:00']
],
[datetime.timedelta(hours=40),
['1990-01-01 00:00:00+00:00', '1990-01-01 04:00:00+00:00',
'1990-01-01 08:00:00+00:00', '1990-01-01 12:00:00+00:00',
'1990-01-01 16:00:00+00:00', '1990-01-01 20:00:00+00:00',
'1990-01-02 00:00:00+00:00', '1990-01-02 04:00:00+00:00',
'1990-01-02 08:00:00+00:00', '1990-01-02 12:00:00+00:00',
'1990-01-02 16:00:00+00:00']
],
[datetime.timedelta(minutes=20),
['1990-01-01 00:00:00+00:00', '1990-01-01 00:05:00+00:00',
'1990-01-01 00:10:00+00:00', '1990-01-01 00:15:00+00:00',
'1990-01-01 00:20:00+00:00']
],
[datetime.timedelta(seconds=40),
['1990-01-01 00:00:00+00:00', '1990-01-01 00:00:05+00:00',
'1990-01-01 00:00:10+00:00', '1990-01-01 00:00:15+00:00',
'1990-01-01 00:00:20+00:00', '1990-01-01 00:00:25+00:00',
'1990-01-01 00:00:30+00:00', '1990-01-01 00:00:35+00:00',
'1990-01-01 00:00:40+00:00']
],
[datetime.timedelta(microseconds=1500),
['1989-12-31 23:59:59.999500+00:00',
'1990-01-01 00:00:00+00:00',
'1990-01-01 00:00:00.000500+00:00',
'1990-01-01 00:00:00.001000+00:00',
'1990-01-01 00:00:00.001500+00:00']
],
)
for t_delta, expected in results:
d2 = d1 + t_delta
locator = _create_auto_date_locator(d1, d2)
assert list(map(str, mdates.num2date(locator()))) == expected
def test_auto_date_locator_intmult():
def _create_auto_date_locator(date1, date2):
locator = mdates.AutoDateLocator(interval_multiples=True)
locator.create_dummy_axis()
locator.set_view_interval(mdates.date2num(date1),
mdates.date2num(date2))
return locator
results = ([datetime.timedelta(weeks=52 * 200),
['1980-01-01 00:00:00+00:00', '2000-01-01 00:00:00+00:00',
'2020-01-01 00:00:00+00:00', '2040-01-01 00:00:00+00:00',
'2060-01-01 00:00:00+00:00', '2080-01-01 00:00:00+00:00',
'2100-01-01 00:00:00+00:00', '2120-01-01 00:00:00+00:00',
'2140-01-01 00:00:00+00:00', '2160-01-01 00:00:00+00:00',
'2180-01-01 00:00:00+00:00', '2200-01-01 00:00:00+00:00']
],
[datetime.timedelta(weeks=52),
['1997-01-01 00:00:00+00:00', '1997-02-01 00:00:00+00:00',
'1997-03-01 00:00:00+00:00', '1997-04-01 00:00:00+00:00',
'1997-05-01 00:00:00+00:00', '1997-06-01 00:00:00+00:00',
'1997-07-01 00:00:00+00:00', '1997-08-01 00:00:00+00:00',
'1997-09-01 00:00:00+00:00', '1997-10-01 00:00:00+00:00',
'1997-11-01 00:00:00+00:00', '1997-12-01 00:00:00+00:00']
],
[datetime.timedelta(days=141),
['1997-01-01 00:00:00+00:00', '1997-01-22 00:00:00+00:00',
'1997-02-01 00:00:00+00:00', '1997-02-22 00:00:00+00:00',
'1997-03-01 00:00:00+00:00', '1997-03-22 00:00:00+00:00',
'1997-04-01 00:00:00+00:00', '1997-04-22 00:00:00+00:00',
'1997-05-01 00:00:00+00:00', '1997-05-22 00:00:00+00:00']
],
[datetime.timedelta(days=40),
['1997-01-01 00:00:00+00:00', '1997-01-05 00:00:00+00:00',
'1997-01-09 00:00:00+00:00', '1997-01-13 00:00:00+00:00',
'1997-01-17 00:00:00+00:00', '1997-01-21 00:00:00+00:00',
'1997-01-25 00:00:00+00:00', '1997-01-29 00:00:00+00:00',
'1997-02-01 00:00:00+00:00', '1997-02-05 00:00:00+00:00',
'1997-02-09 00:00:00+00:00']
],
[datetime.timedelta(hours=40),
['1997-01-01 00:00:00+00:00', '1997-01-01 04:00:00+00:00',
'1997-01-01 08:00:00+00:00', '1997-01-01 12:00:00+00:00',
'1997-01-01 16:00:00+00:00', '1997-01-01 20:00:00+00:00',
'1997-01-02 00:00:00+00:00', '1997-01-02 04:00:00+00:00',
'1997-01-02 08:00:00+00:00', '1997-01-02 12:00:00+00:00',
'1997-01-02 16:00:00+00:00']
],
[datetime.timedelta(minutes=20),
['1997-01-01 00:00:00+00:00', '1997-01-01 00:05:00+00:00',
'1997-01-01 00:10:00+00:00', '1997-01-01 00:15:00+00:00',
'1997-01-01 00:20:00+00:00']
],
[datetime.timedelta(seconds=40),
['1997-01-01 00:00:00+00:00', '1997-01-01 00:00:05+00:00',
'1997-01-01 00:00:10+00:00', '1997-01-01 00:00:15+00:00',
'1997-01-01 00:00:20+00:00', '1997-01-01 00:00:25+00:00',
'1997-01-01 00:00:30+00:00', '1997-01-01 00:00:35+00:00',
'1997-01-01 00:00:40+00:00']
],
[datetime.timedelta(microseconds=1500),
['1996-12-31 23:59:59.999500+00:00',
'1997-01-01 00:00:00+00:00',
'1997-01-01 00:00:00.000500+00:00',
'1997-01-01 00:00:00.001000+00:00',
'1997-01-01 00:00:00.001500+00:00']
],
)
d1 = datetime.datetime(1997, 1, 1)
for t_delta, expected in results:
d2 = d1 + t_delta
locator = _create_auto_date_locator(d1, d2)
assert list(map(str, mdates.num2date(locator()))) == expected
def test_concise_formatter():
def _create_auto_date_locator(date1, date2):
fig, ax = plt.subplots()
locator = mdates.AutoDateLocator(interval_multiples=True)
formatter = mdates.ConciseDateFormatter(locator)
ax.yaxis.set_major_locator(locator)
ax.yaxis.set_major_formatter(formatter)
ax.set_ylim(date1, date2)
fig.canvas.draw()
sts = []
for st in ax.get_yticklabels():
sts += [st.get_text()]
return sts
d1 = datetime.datetime(1997, 1, 1)
results = ([datetime.timedelta(weeks=52 * 200),
[str(t) for t in range(1980, 2201, 20)]
],
[datetime.timedelta(weeks=52),
['1997', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
],
[datetime.timedelta(days=141),
['Jan', '22', 'Feb', '22', 'Mar', '22', 'Apr', '22',
'May', '22']
],
[datetime.timedelta(days=40),
['Jan', '05', '09', '13', '17', '21', '25', '29', 'Feb',
'05', '09']
],
[datetime.timedelta(hours=40),
['Jan-01', '04:00', '08:00', '12:00', '16:00', '20:00',
'Jan-02', '04:00', '08:00', '12:00', '16:00']
],
[datetime.timedelta(minutes=20),
['00:00', '00:05', '00:10', '00:15', '00:20']
],
[datetime.timedelta(seconds=40),
['00:00', '05', '10', '15', '20', '25', '30', '35', '40']
],
[datetime.timedelta(seconds=2),
['59.5', '00:00', '00.5', '01.0', '01.5', '02.0', '02.5']
],
)
for t_delta, expected in results:
d2 = d1 + t_delta
strings = _create_auto_date_locator(d1, d2)
assert strings == expected
def test_auto_date_locator_intmult_tz():
def _create_auto_date_locator(date1, date2, tz):
locator = mdates.AutoDateLocator(interval_multiples=True, tz=tz)
locator.create_dummy_axis()
locator.set_view_interval(mdates.date2num(date1),
mdates.date2num(date2))
return locator
results = ([datetime.timedelta(weeks=52*200),
['1980-01-01 00:00:00-08:00', '2000-01-01 00:00:00-08:00',
'2020-01-01 00:00:00-08:00', '2040-01-01 00:00:00-08:00',
'2060-01-01 00:00:00-08:00', '2080-01-01 00:00:00-08:00',
'2100-01-01 00:00:00-08:00', '2120-01-01 00:00:00-08:00',
'2140-01-01 00:00:00-08:00', '2160-01-01 00:00:00-08:00',
'2180-01-01 00:00:00-08:00', '2200-01-01 00:00:00-08:00']
],
[datetime.timedelta(weeks=52),
['1997-01-01 00:00:00-08:00', '1997-02-01 00:00:00-08:00',
'1997-03-01 00:00:00-08:00', '1997-04-01 00:00:00-08:00',
'1997-05-01 00:00:00-07:00', '1997-06-01 00:00:00-07:00',
'1997-07-01 00:00:00-07:00', '1997-08-01 00:00:00-07:00',
'1997-09-01 00:00:00-07:00', '1997-10-01 00:00:00-07:00',
'1997-11-01 00:00:00-08:00', '1997-12-01 00:00:00-08:00']
],
[datetime.timedelta(days=141),
['1997-01-01 00:00:00-08:00', '1997-01-22 00:00:00-08:00',
'1997-02-01 00:00:00-08:00', '1997-02-22 00:00:00-08:00',
'1997-03-01 00:00:00-08:00', '1997-03-22 00:00:00-08:00',
'1997-04-01 00:00:00-08:00', '1997-04-22 00:00:00-07:00',
'1997-05-01 00:00:00-07:00', '1997-05-22 00:00:00-07:00']
],
[datetime.timedelta(days=40),
['1997-01-01 00:00:00-08:00', '1997-01-05 00:00:00-08:00',
'1997-01-09 00:00:00-08:00', '1997-01-13 00:00:00-08:00',
'1997-01-17 00:00:00-08:00', '1997-01-21 00:00:00-08:00',
'1997-01-25 00:00:00-08:00', '1997-01-29 00:00:00-08:00',
'1997-02-01 00:00:00-08:00', '1997-02-05 00:00:00-08:00',
'1997-02-09 00:00:00-08:00']
],
[datetime.timedelta(hours=40),
['1997-01-01 00:00:00-08:00', '1997-01-01 04:00:00-08:00',
'1997-01-01 08:00:00-08:00', '1997-01-01 12:00:00-08:00',
'1997-01-01 16:00:00-08:00', '1997-01-01 20:00:00-08:00',
'1997-01-02 00:00:00-08:00', '1997-01-02 04:00:00-08:00',
'1997-01-02 08:00:00-08:00', '1997-01-02 12:00:00-08:00',
'1997-01-02 16:00:00-08:00']
],
[datetime.timedelta(minutes=20),
['1997-01-01 00:00:00-08:00', '1997-01-01 00:05:00-08:00',
'1997-01-01 00:10:00-08:00', '1997-01-01 00:15:00-08:00',
'1997-01-01 00:20:00-08:00']
],
[datetime.timedelta(seconds=40),
['1997-01-01 00:00:00-08:00', '1997-01-01 00:00:05-08:00',
'1997-01-01 00:00:10-08:00', '1997-01-01 00:00:15-08:00',
'1997-01-01 00:00:20-08:00', '1997-01-01 00:00:25-08:00',
'1997-01-01 00:00:30-08:00', '1997-01-01 00:00:35-08:00',
'1997-01-01 00:00:40-08:00']
]
)
tz = dateutil.tz.gettz('Canada/Pacific')
d1 = datetime.datetime(1997, 1, 1, tzinfo=tz)
for t_delta, expected in results:
with rc_context({'_internal.classic_mode': False}):
d2 = d1 + t_delta
locator = _create_auto_date_locator(d1, d2, tz)
st = list(map(str, mdates.num2date(locator(), tz=tz)))
assert st == expected
@image_comparison(baseline_images=['date_inverted_limit'],
extensions=['png'])
def test_date_inverted_limit():
# test ax hline with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 31)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhline(t0, color="blue", lw=3)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
ax.invert_yaxis()
fig.subplots_adjust(left=0.25)
def _test_date2num_dst(date_range, tz_convert):
# Timezones
BRUSSELS = dateutil.tz.gettz('Europe/Brussels')
UTC = mdates.UTC
# Create a list of timezone-aware datetime objects in UTC
# Interval is 0b0.0000011 days, to prevent float rounding issues
dtstart = datetime.datetime(2014, 3, 30, 0, 0, tzinfo=UTC)
interval = datetime.timedelta(minutes=33, seconds=45)
interval_days = 0.0234375 # 2025 / 86400 seconds
N = 8
dt_utc = date_range(start=dtstart, freq=interval, periods=N)
dt_bxl = tz_convert(dt_utc, BRUSSELS)
expected_ordinalf = [735322.0 + (i * interval_days) for i in range(N)]
actual_ordinalf = list(mdates.date2num(dt_bxl))
assert actual_ordinalf == expected_ordinalf
def test_date2num_dst():
# Test for github issue #3896, but in date2num around DST transitions
# with a timezone-aware pandas date_range object.
class dt_tzaware(datetime.datetime):
"""
This bug specifically occurs because of the normalization behavior of
pandas Timestamp objects, so in order to replicate it, we need a
datetime-like object that applies timezone normalization after
subtraction.
"""
def __sub__(self, other):
r = super().__sub__(other)
tzinfo = getattr(r, 'tzinfo', None)
if tzinfo is not None:
localizer = getattr(tzinfo, 'normalize', None)
if localizer is not None:
r = tzinfo.normalize(r)
if isinstance(r, datetime.datetime):
r = self.mk_tzaware(r)
return r
def __add__(self, other):
return self.mk_tzaware(super().__add__(other))
def astimezone(self, tzinfo):
dt = super().astimezone(tzinfo)
return self.mk_tzaware(dt)
@classmethod
def mk_tzaware(cls, datetime_obj):
kwargs = {}
attrs = ('year',
'month',
'day',
'hour',
'minute',
'second',
'microsecond',
'tzinfo')
for attr in attrs:
val = getattr(datetime_obj, attr, None)
if val is not None:
kwargs[attr] = val
return cls(**kwargs)
# Define a date_range function similar to pandas.date_range
def date_range(start, freq, periods):
dtstart = dt_tzaware.mk_tzaware(start)
return [dtstart + (i * freq) for i in range(periods)]
# Define a tz_convert function that converts a list to a new time zone.
def tz_convert(dt_list, tzinfo):
return [d.astimezone(tzinfo) for d in dt_list]
_test_date2num_dst(date_range, tz_convert)
def test_date2num_dst_pandas(pd):
# Test for github issue #3896, but in date2num around DST transitions
# with a timezone-aware pandas date_range object.
def tz_convert(*args):
return pd.DatetimeIndex.tz_convert(*args).astype(object)
_test_date2num_dst(pd.date_range, tz_convert)
def _test_rrulewrapper(attach_tz, get_tz):
SYD = get_tz('Australia/Sydney')
dtstart = attach_tz(datetime.datetime(2017, 4, 1, 0), SYD)
dtend = attach_tz(datetime.datetime(2017, 4, 4, 0), SYD)
rule = mdates.rrulewrapper(freq=dateutil.rrule.DAILY, dtstart=dtstart)
act = rule.between(dtstart, dtend)
exp = [datetime.datetime(2017, 4, 1, 13, tzinfo=dateutil.tz.tzutc()),
datetime.datetime(2017, 4, 2, 14, tzinfo=dateutil.tz.tzutc())]
assert act == exp
def test_rrulewrapper():
def attach_tz(dt, zi):
return dt.replace(tzinfo=zi)
_test_rrulewrapper(attach_tz, dateutil.tz.gettz)
@pytest.mark.pytz
@pytest.mark.skipif(not __has_pytz(), reason="Requires pytz")
def test_rrulewrapper_pytz():
# Test to make sure pytz zones are supported in rrules
import pytz
def attach_tz(dt, zi):
return zi.localize(dt)
_test_rrulewrapper(attach_tz, pytz.timezone)
@pytest.mark.pytz
@pytest.mark.skipif(not __has_pytz(), reason="Requires pytz")
def test_yearlocator_pytz():
import pytz
tz = pytz.timezone('America/New_York')
x = [tz.localize(datetime.datetime(2010, 1, 1))
+ datetime.timedelta(i) for i in range(2000)]
locator = mdates.AutoDateLocator(interval_multiples=True, tz=tz)
locator.create_dummy_axis()
locator.set_view_interval(mdates.date2num(x[0])-1.0,
mdates.date2num(x[-1])+1.0)
np.testing.assert_allclose([733408.208333, 733773.208333, 734138.208333,
734503.208333, 734869.208333,
735234.208333, 735599.208333], locator())
expected = ['2009-01-01 00:00:00-05:00',
'2010-01-01 00:00:00-05:00', '2011-01-01 00:00:00-05:00',
'2012-01-01 00:00:00-05:00', '2013-01-01 00:00:00-05:00',
'2014-01-01 00:00:00-05:00', '2015-01-01 00:00:00-05:00']
st = list(map(str, mdates.num2date(locator(), tz=tz)))
assert st == expected
def test_DayLocator():
with pytest.raises(ValueError):
mdates.DayLocator(interval=-1)
with pytest.raises(ValueError):
mdates.DayLocator(interval=-1.5)
with pytest.raises(ValueError):
mdates.DayLocator(interval=0)
with pytest.raises(ValueError):
mdates.DayLocator(interval=1.3)
mdates.DayLocator(interval=1.0)
def test_tz_utc():
dt = datetime.datetime(1970, 1, 1, tzinfo=mdates.UTC)
dt.tzname()
@pytest.mark.parametrize("x, tdelta",
[(1, datetime.timedelta(days=1)),
([1, 1.5], [datetime.timedelta(days=1),
datetime.timedelta(days=1.5)])])
def test_num2timedelta(x, tdelta):
dt = mdates.num2timedelta(x)
assert dt == tdelta
def test_datetime64_in_list():
dt = [np.datetime64('2000-01-01'), np.datetime64('2001-01-01')]
dn = mdates.date2num(dt)
assert np.array_equal(dn, [730120., 730486.])
|
fbaa897ac0436c5d555576299d4b41ab6ffd067ffdf10f4c9f7848caf2738dda
|
"""Catch all for categorical functions"""
import pytest
import numpy as np
from matplotlib.axes import Axes
import matplotlib.pyplot as plt
import matplotlib.category as cat
# Python2/3 text handling
_to_str = cat.StrCategoryFormatter._text
class TestUnitData(object):
test_cases = [('single', (["hello world"], [0])),
('unicode', (["Здравствуйте мир"], [0])),
('mixed', (['A', "np.nan", 'B', "3.14", "мир"],
[0, 1, 2, 3, 4]))]
ids, data = zip(*test_cases)
@pytest.mark.parametrize("data, locs", data, ids=ids)
def test_unit(self, data, locs):
unit = cat.UnitData(data)
assert list(unit._mapping.keys()) == data
assert list(unit._mapping.values()) == locs
def test_update(self):
data = ['a', 'd']
locs = [0, 1]
data_update = ['b', 'd', 'e']
unique_data = ['a', 'd', 'b', 'e']
updated_locs = [0, 1, 2, 3]
unit = cat.UnitData(data)
assert list(unit._mapping.keys()) == data
assert list(unit._mapping.values()) == locs
unit.update(data_update)
assert list(unit._mapping.keys()) == unique_data
assert list(unit._mapping.values()) == updated_locs
failing_test_cases = [("number", 3.14), ("nan", np.nan),
("list", [3.14, 12]), ("mixed type", ["A", 2])]
fids, fdata = zip(*test_cases)
@pytest.mark.parametrize("fdata", fdata, ids=fids)
def test_non_string_fails(self, fdata):
with pytest.raises(TypeError):
cat.UnitData(fdata)
@pytest.mark.parametrize("fdata", fdata, ids=fids)
def test_non_string_update_fails(self, fdata):
unitdata = cat.UnitData()
with pytest.raises(TypeError):
unitdata.update(fdata)
class FakeAxis(object):
def __init__(self, units):
self.units = units
class TestStrCategoryConverter(object):
"""Based on the pandas conversion and factorization tests:
ref: /pandas/tseries/tests/test_converter.py
/pandas/tests/test_algos.py:TestFactorize
"""
test_cases = [("unicode", ["Здравствуйте мир"]),
("ascii", ["hello world"]),
("single", ['a', 'b', 'c']),
("integer string", ["1", "2"]),
("single + values>10", ["A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T", "U",
"V", "W", "X", "Y", "Z"])]
ids, values = zip(*test_cases)
failing_test_cases = [("mixed", [3.14, 'A', np.inf]),
("string integer", ['42', 42])]
fids, fvalues = zip(*failing_test_cases)
@pytest.fixture(autouse=True)
def mock_axis(self, request):
self.cc = cat.StrCategoryConverter()
# self.unit should be probably be replaced with real mock unit
self.unit = cat.UnitData()
self.ax = FakeAxis(self.unit)
@pytest.mark.parametrize("vals", values, ids=ids)
def test_convert(self, vals):
np.testing.assert_allclose(self.cc.convert(vals, self.ax.units,
self.ax),
range(len(vals)))
@pytest.mark.parametrize("value", ["hi", "мир"], ids=["ascii", "unicode"])
def test_convert_one_string(self, value):
assert self.cc.convert(value, self.unit, self.ax) == 0
def test_convert_one_number(self):
actual = self.cc.convert(0.0, self.unit, self.ax)
np.testing.assert_allclose(actual, np.array([0.]))
def test_convert_float_array(self):
data = np.array([1, 2, 3], dtype=float)
actual = self.cc.convert(data, self.unit, self.ax)
np.testing.assert_allclose(actual, np.array([1., 2., 3.]))
@pytest.mark.parametrize("fvals", fvalues, ids=fids)
def test_convert_fail(self, fvals):
with pytest.raises(TypeError):
self.cc.convert(fvals, self.unit, self.ax)
def test_axisinfo(self):
axis = self.cc.axisinfo(self.unit, self.ax)
assert isinstance(axis.majloc, cat.StrCategoryLocator)
assert isinstance(axis.majfmt, cat.StrCategoryFormatter)
def test_default_units(self):
assert isinstance(self.cc.default_units(["a"], self.ax), cat.UnitData)
@pytest.fixture
def ax():
return plt.figure().subplots()
PLOT_LIST = [Axes.scatter, Axes.plot, Axes.bar]
PLOT_IDS = ["scatter", "plot", "bar"]
class TestStrCategoryLocator(object):
def test_StrCategoryLocator(self):
locs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
unit = cat.UnitData([str(j) for j in locs])
ticks = cat.StrCategoryLocator(unit._mapping)
np.testing.assert_array_equal(ticks.tick_values(None, None), locs)
@pytest.mark.parametrize("plotter", PLOT_LIST, ids=PLOT_IDS)
def test_StrCategoryLocatorPlot(self, ax, plotter):
ax.plot(["a", "b", "c"])
np.testing.assert_array_equal(ax.yaxis.major.locator(), range(3))
class TestStrCategoryFormatter(object):
test_cases = [("ascii", ["hello", "world", "hi"]),
("unicode", ["Здравствуйте", "привет"])]
ids, cases = zip(*test_cases)
@pytest.mark.parametrize("ydata", cases, ids=ids)
def test_StrCategoryFormatter(self, ax, ydata):
unit = cat.UnitData(ydata)
labels = cat.StrCategoryFormatter(unit._mapping)
for i, d in enumerate(ydata):
assert labels(i, i) == _to_str(d)
@pytest.mark.parametrize("ydata", cases, ids=ids)
@pytest.mark.parametrize("plotter", PLOT_LIST, ids=PLOT_IDS)
def test_StrCategoryFormatterPlot(self, ax, ydata, plotter):
plotter(ax, range(len(ydata)), ydata)
for i, d in enumerate(ydata):
assert ax.yaxis.major.formatter(i, i) == _to_str(d)
assert ax.yaxis.major.formatter(i+1, i+1) == ""
assert ax.yaxis.major.formatter(0, None) == ""
def axis_test(axis, labels):
ticks = list(range(len(labels)))
np.testing.assert_array_equal(axis.get_majorticklocs(), ticks)
graph_labels = [axis.major.formatter(i, i) for i in ticks]
assert graph_labels == [_to_str(l) for l in labels]
assert list(axis.units._mapping.keys()) == [l for l in labels]
assert list(axis.units._mapping.values()) == ticks
class TestPlotBytes(object):
bytes_cases = [('string list', ['a', 'b', 'c']),
('bytes list', [b'a', b'b', b'c']),
('bytes ndarray', np.array([b'a', b'b', b'c']))]
bytes_ids, bytes_data = zip(*bytes_cases)
@pytest.mark.parametrize("plotter", PLOT_LIST, ids=PLOT_IDS)
@pytest.mark.parametrize("bdata", bytes_data, ids=bytes_ids)
def test_plot_bytes(self, ax, plotter, bdata):
counts = np.array([4, 6, 5])
plotter(ax, bdata, counts)
axis_test(ax.xaxis, bdata)
class TestPlotNumlike(object):
numlike_cases = [('string list', ['1', '11', '3']),
('string ndarray', np.array(['1', '11', '3'])),
('bytes list', [b'1', b'11', b'3']),
('bytes ndarray', np.array([b'1', b'11', b'3']))]
numlike_ids, numlike_data = zip(*numlike_cases)
@pytest.mark.parametrize("plotter", PLOT_LIST, ids=PLOT_IDS)
@pytest.mark.parametrize("ndata", numlike_data, ids=numlike_ids)
def test_plot_numlike(self, ax, plotter, ndata):
counts = np.array([4, 6, 5])
plotter(ax, ndata, counts)
axis_test(ax.xaxis, ndata)
class TestPlotTypes(object):
@pytest.mark.parametrize("plotter", PLOT_LIST, ids=PLOT_IDS)
def test_plot_unicode(self, ax, plotter):
words = ['Здравствуйте', 'привет']
plotter(ax, words, [0, 1])
axis_test(ax.xaxis, words)
@pytest.fixture
def test_data(self):
self.x = ["hello", "happy", "world"]
self.xy = [2, 6, 3]
self.y = ["Python", "is", "fun"]
self.yx = [3, 4, 5]
@pytest.mark.usefixtures("test_data")
@pytest.mark.parametrize("plotter", PLOT_LIST, ids=PLOT_IDS)
def test_plot_xaxis(self, ax, test_data, plotter):
plotter(ax, self.x, self.xy)
axis_test(ax.xaxis, self.x)
@pytest.mark.usefixtures("test_data")
@pytest.mark.parametrize("plotter", PLOT_LIST, ids=PLOT_IDS)
def test_plot_yaxis(self, ax, test_data, plotter):
plotter(ax, self.yx, self.y)
axis_test(ax.yaxis, self.y)
@pytest.mark.usefixtures("test_data")
@pytest.mark.parametrize("plotter", PLOT_LIST, ids=PLOT_IDS)
def test_plot_xyaxis(self, ax, test_data, plotter):
plotter(ax, self.x, self.y)
axis_test(ax.xaxis, self.x)
axis_test(ax.yaxis, self.y)
@pytest.mark.parametrize("plotter", PLOT_LIST, ids=PLOT_IDS)
def test_update_plot(self, ax, plotter):
plotter(ax, ['a', 'b'], ['e', 'g'])
plotter(ax, ['a', 'b', 'd'], ['f', 'a', 'b'])
plotter(ax, ['b', 'c', 'd'], ['g', 'e', 'd'])
axis_test(ax.xaxis, ['a', 'b', 'd', 'c'])
axis_test(ax.yaxis, ['e', 'g', 'f', 'a', 'b', 'd'])
failing_test_cases = [("mixed", ['A', 3.14]),
("number integer", ['1', 1]),
("string integer", ['42', 42]),
("missing", ['12', np.nan])]
fids, fvalues = zip(*failing_test_cases)
plotters = [Axes.scatter, Axes.bar,
pytest.param(Axes.plot, marks=pytest.mark.xfail)]
@pytest.mark.parametrize("plotter", plotters)
@pytest.mark.parametrize("xdata", fvalues, ids=fids)
def test_mixed_type_exception(self, ax, plotter, xdata):
with pytest.raises(TypeError):
plotter(ax, xdata, [1, 2])
@pytest.mark.parametrize("plotter", plotters)
@pytest.mark.parametrize("xdata", fvalues, ids=fids)
def test_mixed_type_update_exception(self, ax, plotter, xdata):
with pytest.raises(TypeError):
plotter(ax, [0, 3], [1, 3])
plotter(ax, xdata, [1, 2])
|
7e11c75bddf00a5e4a6eb6512f68845738afd812c1654eb34adeba860b08e636
|
import numpy as np
from matplotlib import markers
from matplotlib.path import Path
import pytest
def test_markers_valid():
marker_style = markers.MarkerStyle()
mrk_array = np.array([[-0.5, 0],
[0.5, 0]])
# Checking this doesn't fail.
marker_style.set_marker(mrk_array)
def test_markers_invalid():
marker_style = markers.MarkerStyle()
mrk_array = np.array([[-0.5, 0, 1, 2, 3]])
# Checking this does fail.
with pytest.raises(ValueError):
marker_style.set_marker(mrk_array)
def test_marker_path():
marker_style = markers.MarkerStyle()
path = Path([[0, 0], [1, 0]], [Path.MOVETO, Path.LINETO])
# Checking this doesn't fail.
marker_style.set_marker(path)
|
947d28575acc82a72e6dbb64f17eefcb0cca5edd095ba8e9e9cc8c816005a56f
|
from collections import OrderedDict
import copy
import os
from unittest import mock
from cycler import cycler, Cycler
import pytest
import matplotlib as mpl
from matplotlib import cbook
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
from matplotlib.rcsetup import (validate_bool_maybe_none,
validate_stringlist,
validate_colorlist,
validate_color,
validate_bool,
validate_nseq_int,
validate_nseq_float,
validate_cycler,
validate_hatch,
validate_hist_bins,
validate_markevery,
_validate_linestyle)
def test_rcparams():
mpl.rc('text', usetex=False)
mpl.rc('lines', linewidth=22)
usetex = mpl.rcParams['text.usetex']
linewidth = mpl.rcParams['lines.linewidth']
fname = os.path.join(os.path.dirname(__file__), 'test_rcparams.rc')
# test context given dictionary
with mpl.rc_context(rc={'text.usetex': not usetex}):
assert mpl.rcParams['text.usetex'] == (not usetex)
assert mpl.rcParams['text.usetex'] == usetex
# test context given filename (mpl.rc sets linewidth to 33)
with mpl.rc_context(fname=fname):
assert mpl.rcParams['lines.linewidth'] == 33
assert mpl.rcParams['lines.linewidth'] == linewidth
# test context given filename and dictionary
with mpl.rc_context(fname=fname, rc={'lines.linewidth': 44}):
assert mpl.rcParams['lines.linewidth'] == 44
assert mpl.rcParams['lines.linewidth'] == linewidth
# test rc_file
mpl.rc_file(fname)
assert mpl.rcParams['lines.linewidth'] == 33
def test_RcParams_class():
rc = mpl.RcParams({'font.cursive': ['Apple Chancery',
'Textile',
'Zapf Chancery',
'cursive'],
'font.family': 'sans-serif',
'font.weight': 'normal',
'font.size': 12})
expected_repr = """
RcParams({'font.cursive': ['Apple Chancery',
'Textile',
'Zapf Chancery',
'cursive'],
'font.family': ['sans-serif'],
'font.size': 12.0,
'font.weight': 'normal'})""".lstrip()
assert expected_repr == repr(rc)
expected_str = """
font.cursive: ['Apple Chancery', 'Textile', 'Zapf Chancery', 'cursive']
font.family: ['sans-serif']
font.size: 12.0
font.weight: normal""".lstrip()
assert expected_str == str(rc)
# test the find_all functionality
assert ['font.cursive', 'font.size'] == sorted(rc.find_all('i[vz]'))
assert ['font.family'] == list(rc.find_all('family'))
def test_rcparams_update():
rc = mpl.RcParams({'figure.figsize': (3.5, 42)})
bad_dict = {'figure.figsize': (3.5, 42, 1)}
# make sure validation happens on input
with pytest.raises(ValueError), \
pytest.warns(UserWarning, match="validate"):
rc.update(bad_dict)
def test_rcparams_init():
with pytest.raises(ValueError), \
pytest.warns(UserWarning, match="validate"):
mpl.RcParams({'figure.figsize': (3.5, 42, 1)})
def test_Bug_2543():
# Test that it possible to add all values to itself / deepcopy
# This was not possible because validate_bool_maybe_none did not
# accept None as an argument.
# https://github.com/matplotlib/matplotlib/issues/2543
# We filter warnings at this stage since a number of them are raised
# for deprecated rcparams as they should. We don't want these in the
# printed in the test suite.
with cbook._suppress_matplotlib_deprecation_warning():
with mpl.rc_context():
_copy = mpl.rcParams.copy()
for key in _copy:
mpl.rcParams[key] = _copy[key]
with mpl.rc_context():
_deep_copy = copy.deepcopy(mpl.rcParams)
# real test is that this does not raise
assert validate_bool_maybe_none(None) is None
assert validate_bool_maybe_none("none") is None
with pytest.raises(ValueError):
validate_bool_maybe_none("blah")
with pytest.raises(ValueError):
validate_bool(None)
with pytest.raises(ValueError):
with mpl.rc_context():
mpl.rcParams['svg.fonttype'] = True
legend_color_tests = [
('face', {'color': 'r'}, mcolors.to_rgba('r')),
('face', {'color': 'inherit', 'axes.facecolor': 'r'},
mcolors.to_rgba('r')),
('face', {'color': 'g', 'axes.facecolor': 'r'}, mcolors.to_rgba('g')),
('edge', {'color': 'r'}, mcolors.to_rgba('r')),
('edge', {'color': 'inherit', 'axes.edgecolor': 'r'},
mcolors.to_rgba('r')),
('edge', {'color': 'g', 'axes.facecolor': 'r'}, mcolors.to_rgba('g'))
]
legend_color_test_ids = [
'same facecolor',
'inherited facecolor',
'different facecolor',
'same edgecolor',
'inherited edgecolor',
'different facecolor',
]
@pytest.mark.parametrize('color_type, param_dict, target', legend_color_tests,
ids=legend_color_test_ids)
def test_legend_colors(color_type, param_dict, target):
param_dict[f'legend.{color_type}color'] = param_dict.pop('color')
get_func = f'get_{color_type}color'
with mpl.rc_context(param_dict):
_, ax = plt.subplots()
ax.plot(range(3), label='test')
leg = ax.legend()
assert getattr(leg.legendPatch, get_func)() == target
def test_mfc_rcparams():
mpl.rcParams['lines.markerfacecolor'] = 'r'
ln = mpl.lines.Line2D([1, 2], [1, 2])
assert ln.get_markerfacecolor() == 'r'
def test_mec_rcparams():
mpl.rcParams['lines.markeredgecolor'] = 'r'
ln = mpl.lines.Line2D([1, 2], [1, 2])
assert ln.get_markeredgecolor() == 'r'
def test_Issue_1713():
utf32_be = os.path.join(os.path.dirname(__file__),
'test_utf32_be_rcparams.rc')
with mock.patch('locale.getpreferredencoding', return_value='UTF-32-BE'):
rc = mpl.rc_params_from_file(utf32_be, True, False)
assert rc.get('timezone') == 'UTC'
def generate_validator_testcases(valid):
validation_tests = (
{'validator': validate_bool,
'success': (*((_, True) for _ in
('t', 'y', 'yes', 'on', 'true', '1', 1, True)),
*((_, False) for _ in
('f', 'n', 'no', 'off', 'false', '0', 0, False))),
'fail': ((_, ValueError)
for _ in ('aardvark', 2, -1, [], ))},
{'validator': validate_stringlist,
'success': (('', []),
('a,b', ['a', 'b']),
('aardvark', ['aardvark']),
('aardvark, ', ['aardvark']),
('aardvark, ,', ['aardvark']),
(['a', 'b'], ['a', 'b']),
(('a', 'b'), ['a', 'b']),
(iter(['a', 'b']), ['a', 'b']),
(np.array(['a', 'b']), ['a', 'b']),
((1, 2), ['1', '2']),
(np.array([1, 2]), ['1', '2']),
),
'fail': ((dict(), ValueError),
(1, ValueError),
)
},
{'validator': validate_nseq_int(2),
'success': ((_, [1, 2])
for _ in ('1, 2', [1.5, 2.5], [1, 2],
(1, 2), np.array((1, 2)))),
'fail': ((_, ValueError)
for _ in ('aardvark', ('a', 1),
(1, 2, 3)
))
},
{'validator': validate_nseq_float(2),
'success': ((_, [1.5, 2.5])
for _ in ('1.5, 2.5', [1.5, 2.5], [1.5, 2.5],
(1.5, 2.5), np.array((1.5, 2.5)))),
'fail': ((_, ValueError)
for _ in ('aardvark', ('a', 1),
(1, 2, 3)
))
},
{'validator': validate_cycler,
'success': (('cycler("color", "rgb")',
cycler("color", 'rgb')),
(cycler('linestyle', ['-', '--']),
cycler('linestyle', ['-', '--'])),
("""(cycler("color", ["r", "g", "b"]) +
cycler("mew", [2, 3, 5]))""",
(cycler("color", 'rgb') +
cycler("markeredgewidth", [2, 3, 5]))),
("cycler(c='rgb', lw=[1, 2, 3])",
cycler('color', 'rgb') + cycler('linewidth', [1, 2, 3])),
("cycler('c', 'rgb') * cycler('linestyle', ['-', '--'])",
(cycler('color', 'rgb') *
cycler('linestyle', ['-', '--']))),
(cycler('ls', ['-', '--']),
cycler('linestyle', ['-', '--'])),
(cycler(mew=[2, 5]),
cycler('markeredgewidth', [2, 5])),
),
# This is *so* incredibly important: validate_cycler() eval's
# an arbitrary string! I think I have it locked down enough,
# and that is what this is testing.
# TODO: Note that these tests are actually insufficient, as it may
# be that they raised errors, but still did an action prior to
# raising the exception. We should devise some additional tests
# for that...
'fail': ((4, ValueError), # Gotta be a string or Cycler object
('cycler("bleh, [])', ValueError), # syntax error
('Cycler("linewidth", [1, 2, 3])',
ValueError), # only 'cycler()' function is allowed
('1 + 2', ValueError), # doesn't produce a Cycler object
('os.system("echo Gotcha")', ValueError), # os not available
('import os', ValueError), # should not be able to import
('def badjuju(a): return a; badjuju(cycler("color", "rgb"))',
ValueError), # Should not be able to define anything
# even if it does return a cycler
('cycler("waka", [1, 2, 3])', ValueError), # not a property
('cycler(c=[1, 2, 3])', ValueError), # invalid values
("cycler(lw=['a', 'b', 'c'])", ValueError), # invalid values
(cycler('waka', [1, 3, 5]), ValueError), # not a property
(cycler('color', ['C1', 'r', 'g']), ValueError) # no CN
)
},
{'validator': validate_hatch,
'success': (('--|', '--|'), ('\\oO', '\\oO'),
('/+*/.x', '/+*/.x'), ('', '')),
'fail': (('--_', ValueError),
(8, ValueError),
('X', ValueError)),
},
{'validator': validate_colorlist,
'success': (('r,g,b', ['r', 'g', 'b']),
(['r', 'g', 'b'], ['r', 'g', 'b']),
('r, ,', ['r']),
(['', 'g', 'blue'], ['g', 'blue']),
([np.array([1, 0, 0]), np.array([0, 1, 0])],
np.array([[1, 0, 0], [0, 1, 0]])),
(np.array([[1, 0, 0], [0, 1, 0]]),
np.array([[1, 0, 0], [0, 1, 0]])),
),
'fail': (('fish', ValueError),
),
},
{'validator': validate_color,
'success': (('None', 'none'),
('none', 'none'),
('AABBCC', '#AABBCC'), # RGB hex code
('AABBCC00', '#AABBCC00'), # RGBA hex code
('tab:blue', 'tab:blue'), # named color
('C12', 'C12'), # color from cycle
('(0, 1, 0)', [0.0, 1.0, 0.0]), # RGB tuple
((0, 1, 0), (0, 1, 0)), # non-string version
('(0, 1, 0, 1)', [0.0, 1.0, 0.0, 1.0]), # RGBA tuple
((0, 1, 0, 1), (0, 1, 0, 1)), # non-string version
('(0, 1, "0.5")', [0.0, 1.0, 0.5]), # unusual but valid
),
'fail': (('tab:veryblue', ValueError), # invalid name
('(0, 1)', ValueError), # tuple with length < 3
('(0, 1, 0, 1, 0)', ValueError), # tuple with length > 4
('(0, 1, none)', ValueError), # cannot cast none to float
),
},
{'validator': validate_hist_bins,
'success': (('auto', 'auto'),
('fd', 'fd'),
('10', 10),
('1, 2, 3', [1, 2, 3]),
([1, 2, 3], [1, 2, 3]),
(np.arange(15), np.arange(15))
),
'fail': (('aardvark', ValueError),
)
},
{'validator': validate_markevery,
'success': ((None, None),
(1, 1),
(0.1, 0.1),
((1, 1), (1, 1)),
((0.1, 0.1), (0.1, 0.1)),
([1, 2, 3], [1, 2, 3]),
(slice(2), slice(None, 2, None)),
(slice(1, 2, 3), slice(1, 2, 3))
),
'fail': (((1, 2, 3), TypeError),
([1, 2, 0.3], TypeError),
(['a', 2, 3], TypeError),
([1, 2, 'a'], TypeError),
((0.1, 0.2, 0.3), TypeError),
((0.1, 2, 3), TypeError),
((1, 0.2, 0.3), TypeError),
((1, 0.1), TypeError),
((0.1, 1), TypeError),
(('abc'), TypeError),
((1, 'a'), TypeError),
((0.1, 'b'), TypeError),
(('a', 1), TypeError),
(('a', 0.1), TypeError),
('abc', TypeError),
('a', TypeError),
(object(), TypeError)
)
},
{'validator': _validate_linestyle,
'success': (('-', '-'), ('solid', 'solid'),
('--', '--'), ('dashed', 'dashed'),
('-.', '-.'), ('dashdot', 'dashdot'),
(':', ':'), ('dotted', 'dotted'),
('', ''), (' ', ' '),
('None', 'none'), ('none', 'none'),
('DoTtEd', 'dotted'), # case-insensitive
(['1.23', '4.56'], (None, [1.23, 4.56])),
([1.23, 456], (None, [1.23, 456.0])),
([1, 2, 3, 4], (None, [1.0, 2.0, 3.0, 4.0])),
),
'fail': (('aardvark', ValueError), # not a valid string
(b'dotted', ValueError),
('dotted'.encode('utf-16'), ValueError),
((None, [1, 2]), ValueError), # (offset, dashes) != OK
((0, [1, 2]), ValueError), # idem
((-1, [1, 2]), ValueError), # idem
([1, 2, 3], ValueError), # sequence with odd length
(1.23, ValueError), # not a sequence
)
},
)
for validator_dict in validation_tests:
validator = validator_dict['validator']
if valid:
for arg, target in validator_dict['success']:
yield validator, arg, target
else:
for arg, error_type in validator_dict['fail']:
yield validator, arg, error_type
@pytest.mark.parametrize('validator, arg, target',
generate_validator_testcases(True))
def test_validator_valid(validator, arg, target):
res = validator(arg)
if isinstance(target, np.ndarray):
assert np.all(res == target)
elif not isinstance(target, Cycler):
assert res == target
else:
# Cyclers can't simply be asserted equal. They don't implement __eq__
assert list(res) == list(target)
@pytest.mark.parametrize('validator, arg, exception_type',
generate_validator_testcases(False))
def test_validator_invalid(validator, arg, exception_type):
with pytest.raises(exception_type):
validator(arg)
def test_keymaps():
key_list = [k for k in mpl.rcParams if 'keymap' in k]
for k in key_list:
assert isinstance(mpl.rcParams[k], list)
def test_rcparams_reset_after_fail():
# There was previously a bug that meant that if rc_context failed and
# raised an exception due to issues in the supplied rc parameters, the
# global rc parameters were left in a modified state.
with mpl.rc_context(rc={'text.usetex': False}):
assert mpl.rcParams['text.usetex'] is False
with pytest.raises(KeyError):
with mpl.rc_context(rc=OrderedDict([('text.usetex', True),
('test.blah', True)])):
pass
assert mpl.rcParams['text.usetex'] is False
def test_if_rctemplate_is_up_to_date():
# This tests if the matplotlibrc.template file contains all valid rcParams.
deprecated = {*mpl._all_deprecated, *mpl._deprecated_remain_as_none}
path_to_rc = os.path.join(mpl.get_data_path(), 'matplotlibrc')
with open(path_to_rc, "r") as f:
rclines = f.readlines()
missing = {}
for k, v in mpl.defaultParams.items():
if k[0] == "_":
continue
if k in deprecated:
continue
if k.startswith(
("verbose.", "examples.directory", "text.latex.unicode")):
continue
found = False
for line in rclines:
if k in line:
found = True
if not found:
missing.update({k: v})
if missing:
raise ValueError("The following params are missing in the "
"matplotlibrc.template file: {}"
.format(missing.items()))
def test_if_rctemplate_would_be_valid(tmpdir):
# This tests if the matplotlibrc.template file would result in a valid
# rc file if all lines are uncommented.
path_to_rc = os.path.join(mpl.get_data_path(), 'matplotlibrc')
with open(path_to_rc, "r") as f:
rclines = f.readlines()
newlines = []
for line in rclines:
if line[0] == "#":
newline = line[1:]
else:
newline = line
if "$TEMPLATE_BACKEND" in newline:
newline = "backend : Agg"
if "datapath" in newline:
newline = ""
newlines.append(newline)
d = tmpdir.mkdir('test1')
fname = str(d.join('testrcvalid.temp'))
with open(fname, "w") as f:
f.writelines(newlines)
with pytest.warns(None) as record:
mpl.rc_params_from_file(fname,
fail_on_error=True,
use_default_template=False)
assert len(record) == 0
|
9ece9220a76ccd83570f98793ea291091d3a678e47fac5eb517d95c2e8d24483
|
from pathlib import Path
# Check that the test directories exist.
if not (Path(__file__).parent / 'baseline_images').exists():
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install matplotlib from source to get the '
'test data.')
|
29d5a1819a4488227ca3bcae1639c282a15ea1dc0a482bae855fe6848734e1d3
|
from matplotlib.sankey import Sankey
def test_sankey():
# lets just create a sankey instance and check the code runs
sankey = Sankey()
sankey.add()
def test_label():
s = Sankey(flows=[0.25], labels=['First'], orientations=[-1])
assert s.diagrams[0].texts[0].get_text() == 'First\n0.25'
|
0be07b3429325ac4392b34049c8e417b5e83b001c764953d3f9bfdfbec2c17ea
|
import pytest
from matplotlib.backend_tools import ToolHelpBase
@pytest.mark.parametrize('rc_shortcut,expected', [
('home', 'Home'),
('backspace', 'Backspace'),
('f1', 'F1'),
('ctrl+a', 'Ctrl+A'),
('ctrl+A', 'Ctrl+Shift+A'),
('a', 'a'),
('A', 'A'),
('ctrl+shift+f1', 'Ctrl+Shift+F1'),
('1', '1'),
('cmd+p', 'Cmd+P'),
('cmd+1', 'Cmd+1'),
])
def test_format_shortcut(rc_shortcut, expected):
assert ToolHelpBase.format_shortcut(rc_shortcut) == expected
|
4a5f74018e30ae2dfb6c4bd1b92dd012d844fc65798b976b874624da81898dcd
|
import numpy as np
import pytest
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
@image_comparison(baseline_images=['patheffect1'], remove_text=True)
def test_patheffect1():
ax1 = plt.subplot(111)
ax1.imshow([[1, 2], [2, 3]])
txt = ax1.annotate("test", (1., 1.), (0., 0),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle3", lw=2),
size=20, ha="center",
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
txt.arrow_patch.set_path_effects([path_effects.Stroke(linewidth=5,
foreground="w"),
path_effects.Normal()])
pe = [path_effects.withStroke(linewidth=3, foreground="w")]
ax1.grid(True, linestyle="-", path_effects=pe)
@image_comparison(baseline_images=['patheffect2'], remove_text=True,
style='mpl20')
def test_patheffect2():
ax2 = plt.subplot(111)
arr = np.arange(25).reshape((5, 5))
ax2.imshow(arr)
cntr = ax2.contour(arr, colors="k")
plt.setp(cntr.collections,
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
clbls = ax2.clabel(cntr, fmt="%2.0f", use_clabeltext=True)
plt.setp(clbls,
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
@image_comparison(baseline_images=['patheffect3'])
def test_patheffect3():
p1, = plt.plot([1, 3, 5, 4, 3], 'o-b', lw=4)
p1.set_path_effects([path_effects.SimpleLineShadow(),
path_effects.Normal()])
plt.title(r'testing$^{123}$',
path_effects=[path_effects.withStroke(linewidth=1, foreground="r")])
leg = plt.legend([p1], [r'Line 1$^2$'], fancybox=True, loc='upper left')
leg.legendPatch.set_path_effects([path_effects.withSimplePatchShadow()])
text = plt.text(2, 3, 'Drop test', color='white',
bbox={'boxstyle': 'circle,pad=0.1', 'color': 'red'})
pe = [path_effects.Stroke(linewidth=3.75, foreground='k'),
path_effects.withSimplePatchShadow((6, -3), shadow_rgbFace='blue')]
text.set_path_effects(pe)
text.get_bbox_patch().set_path_effects(pe)
pe = [path_effects.PathPatchEffect(offset=(4, -4), hatch='xxxx',
facecolor='gray'),
path_effects.PathPatchEffect(edgecolor='white', facecolor='black',
lw=1.1)]
t = plt.gcf().text(0.02, 0.1, 'Hatch shadow', fontsize=75, weight=1000,
va='center')
t.set_path_effects(pe)
@image_comparison(baseline_images=['stroked_text'], extensions=['png'])
def test_patheffects_stroked_text():
text_chunks = [
'A B C D E F G H I J K L',
'M N O P Q R S T U V W',
'X Y Z a b c d e f g h i j',
'k l m n o p q r s t u v',
'w x y z 0123456789',
r"!@#$%^&*()-=_+[]\;'",
',./{}|:"<>?'
]
font_size = 50
ax = plt.axes([0, 0, 1, 1])
for i, chunk in enumerate(text_chunks):
text = ax.text(x=0.01, y=(0.9 - i * 0.13), s=chunk,
fontdict={'ha': 'left', 'va': 'center',
'size': font_size, 'color': 'white'})
text.set_path_effects([path_effects.Stroke(linewidth=font_size / 10,
foreground='black'),
path_effects.Normal()])
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.axis('off')
@pytest.mark.xfail
def test_PathEffect_points_to_pixels():
fig = plt.figure(dpi=150)
p1, = plt.plot(range(10))
p1.set_path_effects([path_effects.SimpleLineShadow(),
path_effects.Normal()])
renderer = fig.canvas.get_renderer()
pe_renderer = path_effects.SimpleLineShadow().get_proxy_renderer(renderer)
assert isinstance(pe_renderer, path_effects.PathEffectRenderer)
# Confirm that using a path effects renderer maintains point sizes
# appropriately. Otherwise rendered font would be the wrong size.
assert renderer.points_to_pixels(15) == pe_renderer.points_to_pixels(15)
def test_SimplePatchShadow_offset():
pe = path_effects.SimplePatchShadow(offset=(4, 5))
assert pe._offset == (4, 5)
@image_comparison(baseline_images=['collection'], tol=0.02, style='mpl20')
def test_collection():
x, y = np.meshgrid(np.linspace(0, 10, 150), np.linspace(-5, 5, 100))
data = np.sin(x) + np.cos(y)
cs = plt.contour(data)
pe = [path_effects.PathPatchEffect(edgecolor='black', facecolor='none',
linewidth=12),
path_effects.Stroke(linewidth=5)]
for collection in cs.collections:
collection.set_path_effects(pe)
for text in plt.clabel(cs, colors='white'):
text.set_path_effects([path_effects.withStroke(foreground='k',
linewidth=3)])
text.set_bbox({'boxstyle': 'sawtooth', 'facecolor': 'none',
'edgecolor': 'blue'})
|
0b4a423c118ebf8f52476c710ceaba2d25c0107fd936f216bebfe31bedb71a67
|
import matplotlib.type1font as t1f
import os.path
import difflib
def test_Type1Font():
filename = os.path.join(os.path.dirname(__file__), 'cmr10.pfb')
font = t1f.Type1Font(filename)
slanted = font.transform({'slant': 1})
condensed = font.transform({'extend': 0.5})
with open(filename, 'rb') as fd:
rawdata = fd.read()
assert font.parts[0] == rawdata[0x0006:0x10c5]
assert font.parts[1] == rawdata[0x10cb:0x897f]
assert font.parts[2] == rawdata[0x8985:0x8ba6]
assert font.parts[1:] == slanted.parts[1:]
assert font.parts[1:] == condensed.parts[1:]
differ = difflib.Differ()
diff = list(differ.compare(
font.parts[0].decode('latin-1').splitlines(),
slanted.parts[0].decode('latin-1').splitlines()))
for line in (
# Removes UniqueID
'- FontDirectory/CMR10 known{/CMR10 findfont dup/UniqueID known{dup',
'+ FontDirectory/CMR10 known{/CMR10 findfont dup',
# Changes the font name
'- /FontName /CMR10 def',
'+ /FontName /CMR10_Slant_1000 def',
# Alters FontMatrix
'- /FontMatrix [0.001 0 0 0.001 0 0 ]readonly def',
'+ /FontMatrix [0.001 0.0 0.001 0.001 0.0 0.0]readonly def',
# Alters ItalicAngle
'- /ItalicAngle 0 def',
'+ /ItalicAngle -45.0 def'):
assert line in diff, 'diff to slanted font must contain %s' % line
diff = list(differ.compare(font.parts[0].decode('latin-1').splitlines(),
condensed.parts[0].decode('latin-1').splitlines()))
for line in (
# Removes UniqueID
'- FontDirectory/CMR10 known{/CMR10 findfont dup/UniqueID known{dup',
'+ FontDirectory/CMR10 known{/CMR10 findfont dup',
# Changes the font name
'- /FontName /CMR10 def',
'+ /FontName /CMR10_Extend_500 def',
# Alters FontMatrix
'- /FontMatrix [0.001 0 0 0.001 0 0 ]readonly def',
'+ /FontMatrix [0.0005 0.0 0.0 0.001 0.0 0.0]readonly def'):
assert line in diff, 'diff to condensed font must contain %s' % line
|
ef90e2e9c89cf8963b25b3a7b1acfb37a1dc9b729400929a29b0a85520cf02e5
|
import matplotlib.pyplot as plt
def test_stem_remove():
ax = plt.gca()
st = ax.stem([1, 2], [1, 2], use_line_collection=True)
st.remove()
def test_errorbar_remove():
# Regression test for a bug that caused remove to fail when using
# fmt='none'
ax = plt.gca()
eb = ax.errorbar([1], [1])
eb.remove()
eb = ax.errorbar([1], [1], xerr=1)
eb.remove()
eb = ax.errorbar([1], [1], yerr=2)
eb.remove()
eb = ax.errorbar([1], [1], xerr=[2], yerr=2)
eb.remove()
eb = ax.errorbar([1], [1], fmt='none')
eb.remove()
|
39e57396ca344e23bfa280e7786d64636d1dd31419259ff744192c4d4306150a
|
import itertools
import pickle
from weakref import ref
import warnings
from unittest.mock import patch, Mock
from datetime import datetime
import numpy as np
from numpy.testing import (assert_array_equal, assert_approx_equal,
assert_array_almost_equal)
import pytest
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
from matplotlib.cbook import (
MatplotlibDeprecationWarning, delete_masked_points as dmp)
def test_is_hashable():
with pytest.warns(MatplotlibDeprecationWarning):
s = 'string'
assert cbook.is_hashable(s)
lst = ['list', 'of', 'stings']
assert not cbook.is_hashable(lst)
class Test_delete_masked_points(object):
def setup_method(self):
self.mask1 = [False, False, True, True, False, False]
self.arr0 = np.arange(1.0, 7.0)
self.arr1 = [1, 2, 3, np.nan, np.nan, 6]
self.arr2 = np.array(self.arr1)
self.arr3 = np.ma.array(self.arr2, mask=self.mask1)
self.arr_s = ['a', 'b', 'c', 'd', 'e', 'f']
self.arr_s2 = np.array(self.arr_s)
self.arr_dt = [datetime(2008, 1, 1), datetime(2008, 1, 2),
datetime(2008, 1, 3), datetime(2008, 1, 4),
datetime(2008, 1, 5), datetime(2008, 1, 6)]
self.arr_dt2 = np.array(self.arr_dt)
self.arr_colors = ['r', 'g', 'b', 'c', 'm', 'y']
self.arr_rgba = mcolors.to_rgba_array(self.arr_colors)
def test_bad_first_arg(self):
with pytest.raises(ValueError):
dmp('a string', self.arr0)
def test_string_seq(self):
actual = dmp(self.arr_s, self.arr1)
ind = [0, 1, 2, 5]
expected = (self.arr_s2[ind], self.arr2[ind])
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_datetime(self):
actual = dmp(self.arr_dt, self.arr3)
ind = [0, 1, 5]
expected = (self.arr_dt2[ind], self.arr3[ind].compressed())
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_rgba(self):
actual = dmp(self.arr3, self.arr_rgba)
ind = [0, 1, 5]
expected = (self.arr3[ind].compressed(), self.arr_rgba[ind])
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
class Test_boxplot_stats(object):
def setup(self):
np.random.seed(937)
self.nrows = 37
self.ncols = 4
self.data = np.random.lognormal(size=(self.nrows, self.ncols),
mean=1.5, sigma=1.75)
self.known_keys = sorted([
'mean', 'med', 'q1', 'q3', 'iqr',
'cilo', 'cihi', 'whislo', 'whishi',
'fliers', 'label'
])
self.std_results = cbook.boxplot_stats(self.data)
self.known_nonbootstrapped_res = {
'cihi': 6.8161283264444847,
'cilo': -0.1489815330368689,
'iqr': 13.492709959447094,
'mean': 13.00447442387868,
'med': 3.3335733967038079,
'fliers': np.array([
92.55467075, 87.03819018, 42.23204914, 39.29390996
]),
'q1': 1.3597529879465153,
'q3': 14.85246294739361,
'whishi': 27.899688243699629,
'whislo': 0.042143774965502923
}
self.known_bootstrapped_ci = {
'cihi': 8.939577523357828,
'cilo': 1.8692703958676578,
}
self.known_whis3_res = {
'whishi': 42.232049135969874,
'whislo': 0.042143774965502923,
'fliers': np.array([92.55467075, 87.03819018]),
}
self.known_res_percentiles = {
'whislo': 0.1933685896907924,
'whishi': 42.232049135969874
}
self.known_res_range = {
'whislo': 0.042143774965502923,
'whishi': 92.554670752188699
}
def test_form_main_list(self):
assert isinstance(self.std_results, list)
def test_form_each_dict(self):
for res in self.std_results:
assert isinstance(res, dict)
def test_form_dict_keys(self):
for res in self.std_results:
assert set(res) <= set(self.known_keys)
def test_results_baseline(self):
res = self.std_results[0]
for key, value in self.known_nonbootstrapped_res.items():
assert_array_almost_equal(res[key], value)
def test_results_bootstrapped(self):
results = cbook.boxplot_stats(self.data, bootstrap=10000)
res = results[0]
for key, value in self.known_bootstrapped_ci.items():
assert_approx_equal(res[key], value)
def test_results_whiskers_float(self):
results = cbook.boxplot_stats(self.data, whis=3)
res = results[0]
for key, value in self.known_whis3_res.items():
assert_array_almost_equal(res[key], value)
def test_results_whiskers_range(self):
results = cbook.boxplot_stats(self.data, whis='range')
res = results[0]
for key, value in self.known_res_range.items():
assert_array_almost_equal(res[key], value)
def test_results_whiskers_percentiles(self):
results = cbook.boxplot_stats(self.data, whis=[5, 95])
res = results[0]
for key, value in self.known_res_percentiles.items():
assert_array_almost_equal(res[key], value)
def test_results_withlabels(self):
labels = ['Test1', 2, 'ardvark', 4]
results = cbook.boxplot_stats(self.data, labels=labels)
res = results[0]
for lab, res in zip(labels, results):
assert res['label'] == lab
results = cbook.boxplot_stats(self.data)
for res in results:
assert 'label' not in res
def test_label_error(self):
labels = [1, 2]
with pytest.raises(ValueError):
results = cbook.boxplot_stats(self.data, labels=labels)
def test_bad_dims(self):
data = np.random.normal(size=(34, 34, 34))
with pytest.raises(ValueError):
results = cbook.boxplot_stats(data)
def test_boxplot_stats_autorange_false(self):
x = np.zeros(shape=140)
x = np.hstack([-25, x, 25])
bstats_false = cbook.boxplot_stats(x, autorange=False)
bstats_true = cbook.boxplot_stats(x, autorange=True)
assert bstats_false[0]['whislo'] == 0
assert bstats_false[0]['whishi'] == 0
assert_array_almost_equal(bstats_false[0]['fliers'], [-25, 25])
assert bstats_true[0]['whislo'] == -25
assert bstats_true[0]['whishi'] == 25
assert_array_almost_equal(bstats_true[0]['fliers'], [])
class Test_callback_registry(object):
def setup(self):
self.signal = 'test'
self.callbacks = cbook.CallbackRegistry()
def connect(self, s, func):
return self.callbacks.connect(s, func)
def is_empty(self):
assert self.callbacks._func_cid_map == {}
assert self.callbacks.callbacks == {}
def is_not_empty(self):
assert self.callbacks._func_cid_map != {}
assert self.callbacks.callbacks != {}
def test_callback_complete(self):
# ensure we start with an empty registry
self.is_empty()
# create a class for testing
mini_me = Test_callback_registry()
# test that we can add a callback
cid1 = self.connect(self.signal, mini_me.dummy)
assert type(cid1) == int
self.is_not_empty()
# test that we don't add a second callback
cid2 = self.connect(self.signal, mini_me.dummy)
assert cid1 == cid2
self.is_not_empty()
assert len(self.callbacks._func_cid_map) == 1
assert len(self.callbacks.callbacks) == 1
del mini_me
# check we now have no callbacks registered
self.is_empty()
def dummy(self):
pass
def test_pickling(self):
assert hasattr(pickle.loads(pickle.dumps(cbook.CallbackRegistry())),
"callbacks")
def raising_cb_reg(func):
class TestException(Exception):
pass
def raising_function():
raise RuntimeError
def transformer(excp):
if isinstance(excp, RuntimeError):
raise TestException
raise excp
# default behavior
cb = cbook.CallbackRegistry()
cb.connect('foo', raising_function)
# old default
cb_old = cbook.CallbackRegistry(exception_handler=None)
cb_old.connect('foo', raising_function)
# filter
cb_filt = cbook.CallbackRegistry(exception_handler=transformer)
cb_filt.connect('foo', raising_function)
return pytest.mark.parametrize('cb, excp',
[[cb, None],
[cb_old, RuntimeError],
[cb_filt, TestException]])(func)
@raising_cb_reg
def test_callbackregistry_process_exception(cb, excp):
if excp is not None:
with pytest.raises(excp):
cb.process('foo')
else:
cb.process('foo')
def test_sanitize_sequence():
d = {'a': 1, 'b': 2, 'c': 3}
k = ['a', 'b', 'c']
v = [1, 2, 3]
i = [('a', 1), ('b', 2), ('c', 3)]
assert k == sorted(cbook.sanitize_sequence(d.keys()))
assert v == sorted(cbook.sanitize_sequence(d.values()))
assert i == sorted(cbook.sanitize_sequence(d.items()))
assert i == cbook.sanitize_sequence(i)
assert k == cbook.sanitize_sequence(k)
fail_mapping = (
({'a': 1}, {'forbidden': ('a')}),
({'a': 1}, {'required': ('b')}),
({'a': 1, 'b': 2}, {'required': ('a'), 'allowed': ()})
)
warn_passing_mapping = (
({'a': 1, 'b': 2}, {'a': 1}, {'alias_mapping': {'a': ['b']}}, 1),
({'a': 1, 'b': 2}, {'a': 1},
{'alias_mapping': {'a': ['b']}, 'allowed': ('a',)}, 1),
({'a': 1, 'b': 2}, {'a': 2}, {'alias_mapping': {'a': ['a', 'b']}}, 1),
({'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'c': 3},
{'alias_mapping': {'a': ['b']}, 'required': ('a', )}, 1),
)
pass_mapping = (
({'a': 1, 'b': 2}, {'a': 1, 'b': 2}, {}),
({'b': 2}, {'a': 2}, {'alias_mapping': {'a': ['a', 'b']}}),
({'b': 2}, {'a': 2},
{'alias_mapping': {'a': ['b']}, 'forbidden': ('b', )}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3},
{'required': ('a', ), 'allowed': ('c', )}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3},
{'required': ('a', 'c'), 'allowed': ('c', )}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3},
{'required': ('a', 'c'), 'allowed': ('a', 'c')}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3},
{'required': ('a', 'c'), 'allowed': ()}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3}, {'required': ('a', 'c')}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3}, {'allowed': ('a', 'c')}),
)
@pytest.mark.parametrize('inp, kwargs_to_norm', fail_mapping)
def test_normalize_kwargs_fail(inp, kwargs_to_norm):
with pytest.raises(TypeError):
cbook.normalize_kwargs(inp, **kwargs_to_norm)
@pytest.mark.parametrize('inp, expected, kwargs_to_norm, warn_count',
warn_passing_mapping)
def test_normalize_kwargs_warn(inp, expected, kwargs_to_norm, warn_count):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert expected == cbook.normalize_kwargs(inp, **kwargs_to_norm)
assert len(w) == warn_count
@pytest.mark.parametrize('inp, expected, kwargs_to_norm',
pass_mapping)
def test_normalize_kwargs_pass(inp, expected, kwargs_to_norm):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert expected == cbook.normalize_kwargs(inp, **kwargs_to_norm)
assert len(w) == 0
def test_warn_external_frame_embedded_python():
with patch.object(cbook, "sys") as mock_sys:
mock_sys._getframe = Mock(return_value=None)
with warnings.catch_warnings(record=True) as w:
cbook._warn_external("dummy")
assert len(w) == 1
assert str(w[0].message) == "dummy"
def test_to_prestep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_prestep(x, y1, y2)
x_target = np.asarray([0, 0, 1, 1, 2, 2, 3], dtype='float')
y1_target = np.asarray([0, 1, 1, 2, 2, 3, 3], dtype='float')
y2_target = np.asarray([3, 2, 2, 1, 1, 0, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_prestep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_to_prestep_empty():
steps = cbook.pts_to_prestep([], [])
assert steps.shape == (2, 0)
def test_to_poststep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_poststep(x, y1, y2)
x_target = np.asarray([0, 1, 1, 2, 2, 3, 3], dtype='float')
y1_target = np.asarray([0, 0, 1, 1, 2, 2, 3], dtype='float')
y2_target = np.asarray([3, 3, 2, 2, 1, 1, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_poststep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_to_poststep_empty():
steps = cbook.pts_to_poststep([], [])
assert steps.shape == (2, 0)
def test_to_midstep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_midstep(x, y1, y2)
x_target = np.asarray([0, .5, .5, 1.5, 1.5, 2.5, 2.5, 3], dtype='float')
y1_target = np.asarray([0, 0, 1, 1, 2, 2, 3, 3], dtype='float')
y2_target = np.asarray([3, 3, 2, 2, 1, 1, 0, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_midstep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_to_midstep_empty():
steps = cbook.pts_to_midstep([], [])
assert steps.shape == (2, 0)
@pytest.mark.parametrize(
"args",
[(np.arange(12).reshape(3, 4), 'a'),
(np.arange(12), 'a'),
(np.arange(12), np.arange(3))])
def test_step_fails(args):
with pytest.raises(ValueError):
cbook.pts_to_prestep(*args)
def test_grouper():
class dummy():
pass
a, b, c, d, e = objs = [dummy() for j in range(5)]
g = cbook.Grouper()
g.join(*objs)
assert set(list(g)[0]) == set(objs)
assert set(g.get_siblings(a)) == set(objs)
for other in objs[1:]:
assert g.joined(a, other)
g.remove(a)
for other in objs[1:]:
assert not g.joined(a, other)
for A, B in itertools.product(objs[1:], objs[1:]):
assert g.joined(A, B)
def test_grouper_private():
class dummy():
pass
objs = [dummy() for j in range(5)]
g = cbook.Grouper()
g.join(*objs)
# reach in and touch the internals !
mapping = g._mapping
for o in objs:
assert ref(o) in mapping
base_set = mapping[ref(objs[0])]
for o in objs[1:]:
assert mapping[ref(o)] is base_set
def test_flatiter():
x = np.arange(5)
it = x.flat
assert 0 == next(it)
assert 1 == next(it)
ret = cbook.safe_first_element(it)
assert ret == 0
assert 0 == next(it)
assert 1 == next(it)
def test_reshape2d():
class dummy():
pass
xnew = cbook._reshape_2D([], 'x')
assert np.shape(xnew) == (1, 0)
x = [dummy() for j in range(5)]
xnew = cbook._reshape_2D(x, 'x')
assert np.shape(xnew) == (1, 5)
x = np.arange(5)
xnew = cbook._reshape_2D(x, 'x')
assert np.shape(xnew) == (1, 5)
x = [[dummy() for j in range(5)] for i in range(3)]
xnew = cbook._reshape_2D(x, 'x')
assert np.shape(xnew) == (3, 5)
# this is strange behaviour, but...
x = np.random.rand(3, 5)
xnew = cbook._reshape_2D(x, 'x')
assert np.shape(xnew) == (5, 3)
def test_contiguous_regions():
a, b, c = 3, 4, 5
# Starts and ends with True
mask = [True]*a + [False]*b + [True]*c
expected = [(0, a), (a+b, a+b+c)]
assert cbook.contiguous_regions(mask) == expected
d, e = 6, 7
# Starts with True ends with False
mask = mask + [False]*e
assert cbook.contiguous_regions(mask) == expected
# Starts with False ends with True
mask = [False]*d + mask[:-e]
expected = [(d, d+a), (d+a+b, d+a+b+c)]
assert cbook.contiguous_regions(mask) == expected
# Starts and ends with False
mask = mask + [False]*e
assert cbook.contiguous_regions(mask) == expected
# No True in mask
assert cbook.contiguous_regions([False]*5) == []
# Empty mask
assert cbook.contiguous_regions([]) == []
def test_safe_first_element_pandas_series(pd):
# deliberately create a pandas series with index not starting from 0
s = pd.Series(range(5), index=range(10, 15))
actual = cbook.safe_first_element(s)
assert actual == 0
def test_make_keyword_only(recwarn):
@cbook._make_keyword_only("3.0", "arg")
def func(pre, arg, post=None):
pass
func(1, arg=2)
assert len(recwarn) == 0
with pytest.warns(MatplotlibDeprecationWarning):
func(1, 2)
with pytest.warns(MatplotlibDeprecationWarning):
func(1, 2, 3)
def test_warn_external(recwarn):
cbook._warn_external("oops")
assert len(recwarn) == 1
assert recwarn[0].filename == __file__
|
3e98a733719108e6cfcccbaf5f5ef65dce3e7b187b18d025b311649e39f67dda
|
import warnings
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea
from matplotlib.patches import Rectangle
def example_plot(ax, fontsize=12):
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
@image_comparison(baseline_images=['tight_layout1'])
def test_tight_layout1():
'Test tight_layout for a single subplot'
fig, ax = plt.subplots()
example_plot(ax, fontsize=24)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout2'])
def test_tight_layout2():
'Test tight_layout for multiple subplots'
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout3'])
def test_tight_layout3():
'Test tight_layout for multiple subplots'
fig = plt.figure()
ax1 = plt.subplot(221)
ax2 = plt.subplot(223)
ax3 = plt.subplot(122)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout4'],
freetype_version=('2.5.5', '2.6.1'))
def test_tight_layout4():
'Test tight_layout for subplot2grid'
fig = plt.figure()
ax1 = plt.subplot2grid((3, 3), (0, 0))
ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)
ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout5'])
def test_tight_layout5():
'Test tight_layout for image'
fig = plt.figure()
ax = plt.subplot(111)
arr = np.arange(100).reshape((10, 10))
ax.imshow(arr, interpolation="none")
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout6'])
def test_tight_layout6():
'Test tight_layout for gridspec'
# This raises warnings since tight layout cannot
# do this fully automatically. But the test is
# correct since the layout is manually edited
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
fig = plt.figure()
import matplotlib.gridspec as gridspec
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
gs2 = gridspec.GridSpec(3, 1)
for ss in gs2:
ax = fig.add_subplot(ss)
example_plot(ax)
ax.set_title("")
ax.set_xlabel("")
ax.set_xlabel("x-label", fontsize=12)
gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.45)
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),
0.5, 1 - (gs1.top-top)])
gs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),
None, 1 - (gs2.top-top)],
h_pad=0.45)
@image_comparison(baseline_images=['tight_layout7'])
def test_tight_layout7():
# tight layout with left and right titles
fontsize = 24
fig, ax = plt.subplots()
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Left Title', loc='left', fontsize=fontsize)
ax.set_title('Right Title', loc='right', fontsize=fontsize)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout8'])
def test_tight_layout8():
'Test automatic use of tight_layout'
fig = plt.figure()
fig.set_tight_layout({'pad': .1})
ax = fig.add_subplot(111)
example_plot(ax, fontsize=24)
@image_comparison(baseline_images=['tight_layout9'])
def test_tight_layout9():
# Test tight_layout for non-visible subplots
# GH 8244
f, axarr = plt.subplots(2, 2)
axarr[1][1].set_visible(False)
plt.tight_layout()
def test_outward_ticks():
'Test automatic use of tight_layout'
fig = plt.figure()
ax = fig.add_subplot(221)
ax.xaxis.set_tick_params(tickdir='out', length=16, width=3)
ax.yaxis.set_tick_params(tickdir='out', length=16, width=3)
ax.xaxis.set_tick_params(
tickdir='out', length=32, width=3, tick1On=True, which='minor')
ax.yaxis.set_tick_params(
tickdir='out', length=32, width=3, tick1On=True, which='minor')
ax.xaxis.set_ticks([0], minor=True)
ax.yaxis.set_ticks([0], minor=True)
ax = fig.add_subplot(222)
ax.xaxis.set_tick_params(tickdir='in', length=32, width=3)
ax.yaxis.set_tick_params(tickdir='in', length=32, width=3)
ax = fig.add_subplot(223)
ax.xaxis.set_tick_params(tickdir='inout', length=32, width=3)
ax.yaxis.set_tick_params(tickdir='inout', length=32, width=3)
ax = fig.add_subplot(224)
ax.xaxis.set_tick_params(tickdir='out', length=32, width=3)
ax.yaxis.set_tick_params(tickdir='out', length=32, width=3)
plt.tight_layout()
# These values were obtained after visual checking that they correspond
# to a tight layouting that did take the ticks into account.
ans = [[[0.091, 0.607], [0.433, 0.933]],
[[0.579, 0.607], [0.922, 0.933]],
[[0.091, 0.140], [0.433, 0.466]],
[[0.579, 0.140], [0.922, 0.466]]]
for nn, ax in enumerate(fig.axes):
assert_array_equal(np.round(ax.get_position().get_points(), 3),
ans[nn])
def add_offsetboxes(ax, size=10, margin=.1, color='black'):
"""
Surround ax with OffsetBoxes
"""
m, mp = margin, 1+margin
anchor_points = [(-m, -m), (-m, .5), (-m, mp),
(mp, .5), (.5, mp), (mp, mp),
(.5, -m), (mp, -m), (.5, -m)]
for point in anchor_points:
da = DrawingArea(size, size)
background = Rectangle((0, 0), width=size,
height=size,
facecolor=color,
edgecolor='None',
linewidth=0,
antialiased=False)
da.add_artist(background)
anchored_box = AnchoredOffsetbox(
loc='center',
child=da,
pad=0.,
frameon=False,
bbox_to_anchor=point,
bbox_transform=ax.transAxes,
borderpad=0.)
ax.add_artist(anchored_box)
return anchored_box
@image_comparison(baseline_images=['tight_layout_offsetboxes1',
'tight_layout_offsetboxes2'])
def test_tight_layout_offsetboxes():
# 1.
# - Create 4 subplots
# - Plot a diagonal line on them
# - Surround each plot with 7 boxes
# - Use tight_layout
# - See that the squares are included in the tight_layout
# and that the squares in the middle do not overlap
#
# 2.
# - Make the squares around the right side axes invisible
# - See that the invisible squares do not affect the
# tight_layout
rows = cols = 2
colors = ['red', 'blue', 'green', 'yellow']
x = y = [0, 1]
def _subplots():
_, axs = plt.subplots(rows, cols)
axs = axs.flat
for ax, color in zip(axs, colors):
ax.plot(x, y, color=color)
add_offsetboxes(ax, 20, color=color)
return axs
# 1.
axs = _subplots()
plt.tight_layout()
# 2.
axs = _subplots()
for ax in (axs[cols-1::rows]):
for child in ax.get_children():
if isinstance(child, AnchoredOffsetbox):
child.set_visible(False)
plt.tight_layout()
def test_empty_layout():
"""Test that tight layout doesn't cause an error when there are no axes."""
fig = plt.gcf()
fig.tight_layout()
@pytest.mark.parametrize("label", ["xlabel", "ylabel"])
def test_verybig_decorators(label):
"""Test that warning emitted when xlabel/ylabel too big."""
fig, ax = plt.subplots(figsize=(3, 2))
ax.set(**{label: 'a' * 100})
with pytest.warns(UserWarning):
fig.tight_layout()
def test_big_decorators_horizontal():
"Test that warning emitted when xlabel too big"
fig, axs = plt.subplots(1, 2, figsize=(3, 2))
axs[0].set_xlabel('a' * 30)
axs[1].set_xlabel('b' * 30)
with pytest.warns(UserWarning):
fig.tight_layout()
def test_big_decorators_vertical():
"Test that warning emitted when xlabel too big"
fig, axs = plt.subplots(2, 1, figsize=(3, 2))
axs[0].set_ylabel('a' * 20)
axs[1].set_ylabel('b' * 20)
with pytest.warns(UserWarning):
fig.tight_layout()
def test_badsubplotgrid():
# test that we get warning for mismatched subplot grids rather
# than an error
ax1 = plt.subplot2grid((4, 5), (0, 0))
# this is the bad entry:
ax5 = plt.subplot2grid((5, 5), (0, 3), colspan=3, rowspan=5)
with pytest.warns(UserWarning):
plt.tight_layout()
def test_collapsed():
# test that if a call to tight_layout will collapses the axes that
# it does not get applied:
fig, ax = plt.subplots(tight_layout=True)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.annotate('BIG LONG STRING', xy=(1.25, 2), xytext=(10.5, 1.75),)
p1 = ax.get_position()
with pytest.warns(UserWarning):
plt.tight_layout()
p2 = ax.get_position()
assert p1.width == p2.width
# test that passing a rect doesn't crash...
with pytest.warns(UserWarning):
plt.tight_layout(rect=[0, 0, 0.8, 0.8])
|
2ff82c053c666dd73913529003c494e95a23046501e37341fd4e045af2611c86
|
import builtins
import subprocess
import sys
import textwrap
import matplotlib
def test_simple():
assert 1 + 1 == 2
def test_override_builtins():
import pylab
ok_to_override = {
'__name__',
'__doc__',
'__package__',
'__loader__',
'__spec__',
'any',
'all',
'sum',
'divmod'
}
overridden = False
for key in dir(pylab):
if key in dir(builtins):
if (getattr(pylab, key) != getattr(builtins, key) and
key not in ok_to_override):
print("'%s' was overridden in globals()." % key)
overridden = True
assert not overridden
def test_lazy_imports():
source = textwrap.dedent("""
import sys
import matplotlib.figure
import matplotlib.backend_bases
import matplotlib.pyplot
assert 'matplotlib._png' not in sys.modules
assert 'matplotlib._tri' not in sys.modules
assert 'matplotlib._qhull' not in sys.modules
assert 'matplotlib._contour' not in sys.modules
assert 'urllib.request' not in sys.modules
""")
subprocess.check_call([
sys.executable,
'-c',
source
])
|
e69f0f1f45af4f97f750d99c552151ca214a8612794fbfb2b43314fc9ad7bee0
|
import sys
import platform
import numpy as np
from numpy.testing import assert_array_almost_equal
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
import matplotlib.transforms as mtransforms
on_win = (sys.platform == 'win32')
def velocity_field():
Y, X = np.mgrid[-3:3:100j, -3:3:100j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
return X, Y, U, V
def swirl_velocity_field():
x = np.linspace(-3., 3., 100)
y = np.linspace(-3., 3., 100)
X, Y = np.meshgrid(x, y)
a = 0.1
U = np.cos(a) * (-Y) - np.sin(a) * X
V = np.sin(a) * (-Y) + np.cos(a) * X
return x, y, U, V
@image_comparison(baseline_images=['streamplot_startpoints'],
remove_text=True, style='mpl20')
def test_startpoints():
X, Y, U, V = velocity_field()
start_x = np.linspace(X.min(), X.max(), 10)
start_y = np.linspace(Y.min(), Y.max(), 10)
start_points = np.column_stack([start_x, start_y])
plt.streamplot(X, Y, U, V, start_points=start_points)
plt.plot(start_x, start_y, 'ok')
@image_comparison(baseline_images=['streamplot_colormap'],
tol=.04, remove_text=True, style='mpl20')
def test_colormap():
X, Y, U, V = velocity_field()
plt.streamplot(X, Y, U, V, color=U, density=0.6, linewidth=2,
cmap=plt.cm.autumn)
plt.colorbar()
@image_comparison(baseline_images=['streamplot_linewidth'],
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
remove_text=True, style='mpl20')
def test_linewidth():
X, Y, U, V = velocity_field()
speed = np.hypot(U, V)
lw = 5 * speed / speed.max()
df = 25 / 30 # Compatibility factor for old test image
plt.streamplot(X, Y, U, V, density=[0.5 * df, 1. * df], color='k',
linewidth=lw)
@image_comparison(baseline_images=['streamplot_masks_and_nans'],
tol=0.04 if on_win else 0,
remove_text=True, style='mpl20')
def test_masks_and_nans():
X, Y, U, V = velocity_field()
mask = np.zeros(U.shape, dtype=bool)
mask[40:60, 40:60] = 1
U[:20, :20] = np.nan
U = np.ma.array(U, mask=mask)
with np.errstate(invalid='ignore'):
plt.streamplot(X, Y, U, V, color=U, cmap=plt.cm.Blues)
@image_comparison(baseline_images=['streamplot_maxlength'],
extensions=['png'], remove_text=True, style='mpl20')
def test_maxlength():
x, y, U, V = swirl_velocity_field()
plt.streamplot(x, y, U, V, maxlength=10., start_points=[[0., 1.5]],
linewidth=2, density=2)
@image_comparison(baseline_images=['streamplot_direction'],
extensions=['png'], remove_text=True, style='mpl20')
def test_direction():
x, y, U, V = swirl_velocity_field()
plt.streamplot(x, y, U, V, integration_direction='backward',
maxlength=1.5, start_points=[[1.5, 0.]],
linewidth=2, density=2)
def test_streamplot_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
|
0d8a13eaf9d4373b76b7b5dcbb1f215b57761a2d3972bbea94b93823b1212e21
|
from collections import OrderedDict
from contextlib import contextmanager
import gc
from pathlib import Path
from tempfile import TemporaryDirectory
import sys
import pytest
import matplotlib as mpl
from matplotlib import pyplot as plt, style
from matplotlib.style.core import USER_LIBRARY_PATHS, STYLE_EXTENSION
PARAM = 'image.cmap'
VALUE = 'pink'
DUMMY_SETTINGS = {PARAM: VALUE}
@contextmanager
def temp_style(style_name, settings=None):
"""Context manager to create a style sheet in a temporary directory."""
if not settings:
settings = DUMMY_SETTINGS
temp_file = '%s.%s' % (style_name, STYLE_EXTENSION)
try:
with TemporaryDirectory() as tmpdir:
# Write style settings to file in the tmpdir.
Path(tmpdir, temp_file).write_text(
"\n".join("{}: {}".format(k, v) for k, v in settings.items()))
# Add tmpdir to style path and reload so we can access this style.
USER_LIBRARY_PATHS.append(tmpdir)
style.reload_library()
yield
finally:
style.reload_library()
def test_invalid_rc_warning_includes_filename(capsys):
SETTINGS = {'foo': 'bar'}
basename = 'basename'
with temp_style(basename, SETTINGS):
# style.reload_library() in temp_style() triggers the warning
pass
assert basename in capsys.readouterr().err
def test_available():
with temp_style('_test_', DUMMY_SETTINGS):
assert '_test_' in style.available
def test_use():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
def test_use_url(tmpdir):
path = Path(tmpdir, 'file')
path.write_text('axes.facecolor: adeade')
with temp_style('test', DUMMY_SETTINGS):
url = ('file:'
+ ('///' if sys.platform == 'win32' else '')
+ path.resolve().as_posix())
with style.context(url):
assert mpl.rcParams['axes.facecolor'] == "#adeade"
def test_context():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
# Check that this value is reset after the exiting the context.
assert mpl.rcParams[PARAM] == 'gray'
def test_context_with_dict():
original_value = 'gray'
other_value = 'blue'
mpl.rcParams[PARAM] = original_value
with style.context({PARAM: other_value}):
assert mpl.rcParams[PARAM] == other_value
assert mpl.rcParams[PARAM] == original_value
def test_context_with_dict_after_namedstyle():
# Test dict after style name where dict modifies the same parameter.
original_value = 'gray'
other_value = 'blue'
mpl.rcParams[PARAM] = original_value
with temp_style('test', DUMMY_SETTINGS):
with style.context(['test', {PARAM: other_value}]):
assert mpl.rcParams[PARAM] == other_value
assert mpl.rcParams[PARAM] == original_value
def test_context_with_dict_before_namedstyle():
# Test dict before style name where dict modifies the same parameter.
original_value = 'gray'
other_value = 'blue'
mpl.rcParams[PARAM] = original_value
with temp_style('test', DUMMY_SETTINGS):
with style.context([{PARAM: other_value}, 'test']):
assert mpl.rcParams[PARAM] == VALUE
assert mpl.rcParams[PARAM] == original_value
def test_context_with_union_of_dict_and_namedstyle():
# Test dict after style name where dict modifies the a different parameter.
original_value = 'gray'
other_param = 'text.usetex'
other_value = True
d = {other_param: other_value}
mpl.rcParams[PARAM] = original_value
mpl.rcParams[other_param] = (not other_value)
with temp_style('test', DUMMY_SETTINGS):
with style.context(['test', d]):
assert mpl.rcParams[PARAM] == VALUE
assert mpl.rcParams[other_param] == other_value
assert mpl.rcParams[PARAM] == original_value
assert mpl.rcParams[other_param] == (not other_value)
def test_context_with_badparam():
original_value = 'gray'
other_value = 'blue'
d = OrderedDict([(PARAM, original_value), ('badparam', None)])
with style.context({PARAM: other_value}):
assert mpl.rcParams[PARAM] == other_value
x = style.context([d])
with pytest.raises(KeyError):
with x:
pass
assert mpl.rcParams[PARAM] == other_value
@pytest.mark.parametrize('equiv_styles',
[('mpl20', 'default'),
('mpl15', 'classic')],
ids=['mpl20', 'mpl15'])
def test_alias(equiv_styles):
rc_dicts = []
for sty in equiv_styles:
with style.context(sty):
rc_dicts.append(dict(mpl.rcParams))
rc_base = rc_dicts[0]
for nm, rc in zip(equiv_styles[1:], rc_dicts[1:]):
assert rc_base == rc
def test_xkcd_no_cm():
assert mpl.rcParams["path.sketch"] is None
plt.xkcd()
assert mpl.rcParams["path.sketch"] == (1, 100, 2)
gc.collect()
assert mpl.rcParams["path.sketch"] == (1, 100, 2)
def test_xkcd_cm():
assert mpl.rcParams["path.sketch"] is None
with plt.xkcd():
assert mpl.rcParams["path.sketch"] == (1, 100, 2)
assert mpl.rcParams["path.sketch"] is None
|
22b128fe410fded1ce08265223058d8dccbf1827a04ac39847b1bbedbef39ae6
|
import numpy as np
from numpy.testing import (
assert_array_equal, assert_array_almost_equal, assert_array_less)
import numpy.ma.testutils as matest
import pytest
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
from matplotlib.path import Path
from matplotlib.testing.decorators import image_comparison
def test_delaunay():
# No duplicate points, regular grid.
nx = 5
ny = 4
x, y = np.meshgrid(np.linspace(0.0, 1.0, nx), np.linspace(0.0, 1.0, ny))
x = x.ravel()
y = y.ravel()
npoints = nx*ny
ntriangles = 2 * (nx-1) * (ny-1)
nedges = 3*nx*ny - 2*nx - 2*ny + 1
# Create delaunay triangulation.
triang = mtri.Triangulation(x, y)
# The tests in the remainder of this function should be passed by any
# triangulation that does not contain duplicate points.
# Points - floating point.
assert_array_almost_equal(triang.x, x)
assert_array_almost_equal(triang.y, y)
# Triangles - integers.
assert len(triang.triangles) == ntriangles
assert np.min(triang.triangles) == 0
assert np.max(triang.triangles) == npoints-1
# Edges - integers.
assert len(triang.edges) == nedges
assert np.min(triang.edges) == 0
assert np.max(triang.edges) == npoints-1
# Neighbors - integers.
# Check that neighbors calculated by C++ triangulation class are the same
# as those returned from delaunay routine.
neighbors = triang.neighbors
triang._neighbors = None
assert_array_equal(triang.neighbors, neighbors)
# Is each point used in at least one triangle?
assert_array_equal(np.unique(triang.triangles), np.arange(npoints))
def test_delaunay_duplicate_points():
npoints = 10
duplicate = 7
duplicate_of = 3
np.random.seed(23)
x = np.random.random(npoints)
y = np.random.random(npoints)
x[duplicate] = x[duplicate_of]
y[duplicate] = y[duplicate_of]
# Create delaunay triangulation.
triang = mtri.Triangulation(x, y)
# Duplicate points should be ignored, so the index of the duplicate points
# should not appear in any triangle.
assert_array_equal(np.unique(triang.triangles),
np.delete(np.arange(npoints), duplicate))
def test_delaunay_points_in_line():
# Cannot triangulate points that are all in a straight line, but check
# that delaunay code fails gracefully.
x = np.linspace(0.0, 10.0, 11)
y = np.linspace(0.0, 10.0, 11)
with pytest.raises(RuntimeError):
mtri.Triangulation(x, y)
# Add an extra point not on the line and the triangulation is OK.
x = np.append(x, 2.0)
y = np.append(y, 8.0)
triang = mtri.Triangulation(x, y)
@pytest.mark.parametrize('x, y', [
# Triangulation should raise a ValueError if passed less than 3 points.
([], []),
([1], [5]),
([1, 2], [5, 6]),
# Triangulation should also raise a ValueError if passed duplicate points
# such that there are less than 3 unique points.
([1, 2, 1], [5, 6, 5]),
([1, 2, 2], [5, 6, 6]),
([1, 1, 1, 2, 1, 2], [5, 5, 5, 6, 5, 6]),
])
def test_delaunay_insufficient_points(x, y):
with pytest.raises(ValueError):
mtri.Triangulation(x, y)
def test_delaunay_robust():
# Fails when mtri.Triangulation uses matplotlib.delaunay, works when using
# qhull.
tri_points = np.array([
[0.8660254037844384, -0.5000000000000004],
[0.7577722283113836, -0.5000000000000004],
[0.6495190528383288, -0.5000000000000003],
[0.5412658773652739, -0.5000000000000003],
[0.811898816047911, -0.40625000000000044],
[0.7036456405748561, -0.4062500000000004],
[0.5953924651018013, -0.40625000000000033]])
test_points = np.asarray([
[0.58, -0.46],
[0.65, -0.46],
[0.65, -0.42],
[0.7, -0.48],
[0.7, -0.44],
[0.75, -0.44],
[0.8, -0.48]])
# Utility function that indicates if a triangle defined by 3 points
# (xtri, ytri) contains the test point xy. Avoid calling with a point that
# lies on or very near to an edge of the triangle.
def tri_contains_point(xtri, ytri, xy):
tri_points = np.vstack((xtri, ytri)).T
return Path(tri_points).contains_point(xy)
# Utility function that returns how many triangles of the specified
# triangulation contain the test point xy. Avoid calling with a point that
# lies on or very near to an edge of any triangle in the triangulation.
def tris_contain_point(triang, xy):
count = 0
for tri in triang.triangles:
if tri_contains_point(triang.x[tri], triang.y[tri], xy):
count += 1
return count
# Using matplotlib.delaunay, an invalid triangulation is created with
# overlapping triangles; qhull is OK.
triang = mtri.Triangulation(tri_points[:, 0], tri_points[:, 1])
for test_point in test_points:
assert tris_contain_point(triang, test_point) == 1
# If ignore the first point of tri_points, matplotlib.delaunay throws a
# KeyError when calculating the convex hull; qhull is OK.
triang = mtri.Triangulation(tri_points[1:, 0], tri_points[1:, 1])
@image_comparison(baseline_images=['tripcolor1'], extensions=['png'])
def test_tripcolor():
x = np.asarray([0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1, 0.75])
y = np.asarray([0, 0, 0, 0.5, 0.5, 0.5, 1, 1, 1, 0.75])
triangles = np.asarray([
[0, 1, 3], [1, 4, 3],
[1, 2, 4], [2, 5, 4],
[3, 4, 6], [4, 7, 6],
[4, 5, 9], [7, 4, 9], [8, 7, 9], [5, 8, 9]])
# Triangulation with same number of points and triangles.
triang = mtri.Triangulation(x, y, triangles)
Cpoints = x + 0.5*y
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
Cfaces = 0.5*xmid + ymid
plt.subplot(121)
plt.tripcolor(triang, Cpoints, edgecolors='k')
plt.title('point colors')
plt.subplot(122)
plt.tripcolor(triang, facecolors=Cfaces, edgecolors='k')
plt.title('facecolors')
def test_no_modify():
# Test that Triangulation does not modify triangles array passed to it.
triangles = np.array([[3, 2, 0], [3, 1, 0]], dtype=np.int32)
points = np.array([(0, 0), (0, 1.1), (1, 0), (1, 1)])
old_triangles = triangles.copy()
tri = mtri.Triangulation(points[:, 0], points[:, 1], triangles)
edges = tri.edges
assert_array_equal(old_triangles, triangles)
def test_trifinder():
# Test points within triangles of masked triangulation.
x, y = np.meshgrid(np.arange(4), np.arange(4))
x = x.ravel()
y = y.ravel()
triangles = [[0, 1, 4], [1, 5, 4], [1, 2, 5], [2, 6, 5], [2, 3, 6],
[3, 7, 6], [4, 5, 8], [5, 9, 8], [5, 6, 9], [6, 10, 9],
[6, 7, 10], [7, 11, 10], [8, 9, 12], [9, 13, 12], [9, 10, 13],
[10, 14, 13], [10, 11, 14], [11, 15, 14]]
mask = np.zeros(len(triangles))
mask[8:10] = 1
triang = mtri.Triangulation(x, y, triangles, mask)
trifinder = triang.get_trifinder()
xs = [0.25, 1.25, 2.25, 3.25]
ys = [0.25, 1.25, 2.25, 3.25]
xs, ys = np.meshgrid(xs, ys)
xs = xs.ravel()
ys = ys.ravel()
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 2, 4, -1, 6, -1, 10, -1,
12, 14, 16, -1, -1, -1, -1, -1])
tris = trifinder(xs-0.5, ys-0.5)
assert_array_equal(tris, [-1, -1, -1, -1, -1, 1, 3, 5,
-1, 7, -1, 11, -1, 13, 15, 17])
# Test points exactly on boundary edges of masked triangulation.
xs = [0.5, 1.5, 2.5, 0.5, 1.5, 2.5, 1.5, 1.5, 0.0, 1.0, 2.0, 3.0]
ys = [0.0, 0.0, 0.0, 3.0, 3.0, 3.0, 1.0, 2.0, 1.5, 1.5, 1.5, 1.5]
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 2, 4, 13, 15, 17, 3, 14, 6, 7, 10, 11])
# Test points exactly on boundary corners of masked triangulation.
xs = [0.0, 3.0]
ys = [0.0, 3.0]
tris = trifinder(xs, ys)
assert_array_equal(tris, [0, 17])
#
# Test triangles with horizontal colinear points. These are not valid
# triangulations, but we try to deal with the simplest violations.
#
# If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
delta = 0.0
x = [1.5, 0, 1, 2, 3, 1.5, 1.5]
y = [-1, 0, 0, 0, 0, delta, 1]
triangles = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [1, 2, 5], [2, 3, 5],
[3, 4, 5], [1, 5, 6], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.1, 0.4, 0.9, 1.4, 1.9, 2.4, 2.9]
ys = [-0.1, 0.1]
xs, ys = np.meshgrid(xs, ys)
tris = trifinder(xs, ys)
assert_array_equal(tris, [[-1, 0, 0, 1, 1, 2, -1],
[-1, 6, 6, 6, 7, 7, -1]])
#
# Test triangles with vertical colinear points. These are not valid
# triangulations, but we try to deal with the simplest violations.
#
# If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
delta = 0.0
x = [-1, -delta, 0, 0, 0, 0, 1]
y = [1.5, 1.5, 0, 1, 2, 3, 1.5]
triangles = [[0, 1, 2], [0, 1, 5], [1, 2, 3], [1, 3, 4], [1, 4, 5],
[2, 6, 3], [3, 6, 4], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.1, 0.1]
ys = [-0.1, 0.4, 0.9, 1.4, 1.9, 2.4, 2.9]
xs, ys = np.meshgrid(xs, ys)
tris = trifinder(xs, ys)
assert_array_equal(tris, [[-1, -1], [0, 5], [0, 5], [0, 6], [1, 6], [1, 7],
[-1, -1]])
# Test that changing triangulation by setting a mask causes the trifinder
# to be reinitialised.
x = [0, 1, 0, 1]
y = [0, 0, 1, 1]
triangles = [[0, 1, 2], [1, 3, 2]]
triang = mtri.Triangulation(x, y, triangles)
trifinder = triang.get_trifinder()
xs = [-0.2, 0.2, 0.8, 1.2]
ys = [0.5, 0.5, 0.5, 0.5]
tris = trifinder(xs, ys)
assert_array_equal(tris, [-1, 0, 1, -1])
triang.set_mask([1, 0])
assert trifinder == triang.get_trifinder()
tris = trifinder(xs, ys)
assert_array_equal(tris, [-1, -1, 1, -1])
def test_triinterp():
# Test points within triangles of masked triangulation.
x, y = np.meshgrid(np.arange(4), np.arange(4))
x = x.ravel()
y = y.ravel()
z = 1.23*x - 4.79*y
triangles = [[0, 1, 4], [1, 5, 4], [1, 2, 5], [2, 6, 5], [2, 3, 6],
[3, 7, 6], [4, 5, 8], [5, 9, 8], [5, 6, 9], [6, 10, 9],
[6, 7, 10], [7, 11, 10], [8, 9, 12], [9, 13, 12], [9, 10, 13],
[10, 14, 13], [10, 11, 14], [11, 15, 14]]
mask = np.zeros(len(triangles))
mask[8:10] = 1
triang = mtri.Triangulation(x, y, triangles, mask)
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
xs = np.linspace(0.25, 2.75, 6)
ys = [0.25, 0.75, 2.25, 2.75]
xs, ys = np.meshgrid(xs, ys) # Testing arrays with array.ndim = 2
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
assert_array_almost_equal(zs, (1.23*xs - 4.79*ys))
# Test points outside triangulation.
xs = [-0.25, 1.25, 1.75, 3.25]
ys = xs
xs, ys = np.meshgrid(xs, ys)
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = linear_interp(xs, ys)
assert_array_equal(zs.mask, [[True]*4]*4)
# Test mixed configuration (outside / inside).
xs = np.linspace(0.25, 1.75, 6)
ys = [0.25, 0.75, 1.25, 1.75]
xs, ys = np.meshgrid(xs, ys)
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
matest.assert_array_almost_equal(zs, (1.23*xs - 4.79*ys))
mask = (xs >= 1) * (xs <= 2) * (ys >= 1) * (ys <= 2)
assert_array_equal(zs.mask, mask)
# 2nd order patch test: on a grid with an 'arbitrary shaped' triangle,
# patch test shall be exact for quadratic functions and cubic
# interpolator if *kind* = user
(a, b, c) = (1.23, -4.79, 0.6)
def quad(x, y):
return a*(x-0.5)**2 + b*(y-0.5)**2 + c*x*y
def gradient_quad(x, y):
return (2*a*(x-0.5) + c*y, 2*b*(y-0.5) + c*x)
x = np.array([0.2, 0.33367, 0.669, 0., 1., 1., 0.])
y = np.array([0.3, 0.80755, 0.4335, 0., 0., 1., 1.])
triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
[1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
triang = mtri.Triangulation(x, y, triangles)
z = quad(x, y)
dz = gradient_quad(x, y)
# test points for 2nd order patch test
xs = np.linspace(0., 1., 5)
ys = np.linspace(0., 1., 5)
xs, ys = np.meshgrid(xs, ys)
cubic_user = mtri.CubicTriInterpolator(triang, z, kind='user', dz=dz)
interp_zs = cubic_user(xs, ys)
assert_array_almost_equal(interp_zs, quad(xs, ys))
(interp_dzsdx, interp_dzsdy) = cubic_user.gradient(x, y)
(dzsdx, dzsdy) = gradient_quad(x, y)
assert_array_almost_equal(interp_dzsdx, dzsdx)
assert_array_almost_equal(interp_dzsdy, dzsdy)
# Cubic improvement: cubic interpolation shall perform better than linear
# on a sufficiently dense mesh for a quadratic function.
n = 11
x, y = np.meshgrid(np.linspace(0., 1., n+1), np.linspace(0., 1., n+1))
x = x.ravel()
y = y.ravel()
z = quad(x, y)
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1))
xs, ys = np.meshgrid(np.linspace(0.1, 0.9, 5), np.linspace(0.1, 0.9, 5))
xs = xs.ravel()
ys = ys.ravel()
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
zs = quad(xs, ys)
diff_lin = np.abs(linear_interp(xs, ys) - zs)
for interp in (cubic_min_E, cubic_geom):
diff_cubic = np.abs(interp(xs, ys) - zs)
assert np.max(diff_lin) >= 10 * np.max(diff_cubic)
assert (np.dot(diff_lin, diff_lin) >=
100 * np.dot(diff_cubic, diff_cubic))
def test_triinterpcubic_C1_continuity():
# Below the 4 tests which demonstrate C1 continuity of the
# TriCubicInterpolator (testing the cubic shape functions on arbitrary
# triangle):
#
# 1) Testing continuity of function & derivatives at corner for all 9
# shape functions. Testing also function values at same location.
# 2) Testing C1 continuity along each edge (as gradient is polynomial of
# 2nd order, it is sufficient to test at the middle).
# 3) Testing C1 continuity at triangle barycenter (where the 3 subtriangles
# meet)
# 4) Testing C1 continuity at median 1/3 points (midside between 2
# subtriangles)
# Utility test function check_continuity
def check_continuity(interpolator, loc, values=None):
"""
Checks the continuity of interpolator (and its derivatives) near
location loc. Can check the value at loc itself if *values* is
provided.
*interpolator* TriInterpolator
*loc* location to test (x0, y0)
*values* (optional) array [z0, dzx0, dzy0] to check the value at *loc*
"""
n_star = 24 # Number of continuity points in a boundary of loc
epsilon = 1.e-10 # Distance for loc boundary
k = 100. # Continuity coefficient
(loc_x, loc_y) = loc
star_x = loc_x + epsilon*np.cos(np.linspace(0., 2*np.pi, n_star))
star_y = loc_y + epsilon*np.sin(np.linspace(0., 2*np.pi, n_star))
z = interpolator([loc_x], [loc_y])[0]
(dzx, dzy) = interpolator.gradient([loc_x], [loc_y])
if values is not None:
assert_array_almost_equal(z, values[0])
assert_array_almost_equal(dzx[0], values[1])
assert_array_almost_equal(dzy[0], values[2])
diff_z = interpolator(star_x, star_y) - z
(tab_dzx, tab_dzy) = interpolator.gradient(star_x, star_y)
diff_dzx = tab_dzx - dzx
diff_dzy = tab_dzy - dzy
assert_array_less(diff_z, epsilon*k)
assert_array_less(diff_dzx, epsilon*k)
assert_array_less(diff_dzy, epsilon*k)
# Drawing arbitrary triangle (a, b, c) inside a unit square.
(ax, ay) = (0.2, 0.3)
(bx, by) = (0.33367, 0.80755)
(cx, cy) = (0.669, 0.4335)
x = np.array([ax, bx, cx, 0., 1., 1., 0.])
y = np.array([ay, by, cy, 0., 0., 1., 1.])
triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
[1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
triang = mtri.Triangulation(x, y, triangles)
for idof in range(9):
z = np.zeros(7, dtype=np.float64)
dzx = np.zeros(7, dtype=np.float64)
dzy = np.zeros(7, dtype=np.float64)
values = np.zeros([3, 3], dtype=np.float64)
case = idof//3
values[case, idof % 3] = 1.0
if case == 0:
z[idof] = 1.0
elif case == 1:
dzx[idof % 3] = 1.0
elif case == 2:
dzy[idof % 3] = 1.0
interp = mtri.CubicTriInterpolator(triang, z, kind='user',
dz=(dzx, dzy))
# Test 1) Checking values and continuity at nodes
check_continuity(interp, (ax, ay), values[:, 0])
check_continuity(interp, (bx, by), values[:, 1])
check_continuity(interp, (cx, cy), values[:, 2])
# Test 2) Checking continuity at midside nodes
check_continuity(interp, ((ax+bx)*0.5, (ay+by)*0.5))
check_continuity(interp, ((ax+cx)*0.5, (ay+cy)*0.5))
check_continuity(interp, ((cx+bx)*0.5, (cy+by)*0.5))
# Test 3) Checking continuity at barycenter
check_continuity(interp, ((ax+bx+cx)/3., (ay+by+cy)/3.))
# Test 4) Checking continuity at median 1/3-point
check_continuity(interp, ((4.*ax+bx+cx)/6., (4.*ay+by+cy)/6.))
check_continuity(interp, ((ax+4.*bx+cx)/6., (ay+4.*by+cy)/6.))
check_continuity(interp, ((ax+bx+4.*cx)/6., (ay+by+4.*cy)/6.))
def test_triinterpcubic_cg_solver():
# Now 3 basic tests of the Sparse CG solver, used for
# TriCubicInterpolator with *kind* = 'min_E'
# 1) A commonly used test involves a 2d Poisson matrix.
def poisson_sparse_matrix(n, m):
"""
Return the sparse, (n*m, n*m) matrix in coo format resulting from the
discretisation of the 2-dimensional Poisson equation according to a
finite difference numerical scheme on a uniform (n, m) grid.
"""
l = m*n
rows = np.concatenate([
np.arange(l, dtype=np.int32),
np.arange(l-1, dtype=np.int32), np.arange(1, l, dtype=np.int32),
np.arange(l-n, dtype=np.int32), np.arange(n, l, dtype=np.int32)])
cols = np.concatenate([
np.arange(l, dtype=np.int32),
np.arange(1, l, dtype=np.int32), np.arange(l-1, dtype=np.int32),
np.arange(n, l, dtype=np.int32), np.arange(l-n, dtype=np.int32)])
vals = np.concatenate([
4*np.ones(l, dtype=np.float64),
-np.ones(l-1, dtype=np.float64), -np.ones(l-1, dtype=np.float64),
-np.ones(l-n, dtype=np.float64), -np.ones(l-n, dtype=np.float64)])
# In fact +1 and -1 diags have some zeros
vals[l:2*l-1][m-1::m] = 0.
vals[2*l-1:3*l-2][m-1::m] = 0.
return vals, rows, cols, (n*m, n*m)
# Instantiating a sparse Poisson matrix of size 48 x 48:
(n, m) = (12, 4)
mat = mtri.triinterpolate._Sparse_Matrix_coo(*poisson_sparse_matrix(n, m))
mat.compress_csc()
mat_dense = mat.to_dense()
# Testing a sparse solve for all 48 basis vector
for itest in range(n*m):
b = np.zeros(n*m, dtype=np.float64)
b[itest] = 1.
x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.zeros(n*m),
tol=1.e-10)
assert_array_almost_equal(np.dot(mat_dense, x), b)
# 2) Same matrix with inserting 2 rows - cols with null diag terms
# (but still linked with the rest of the matrix by extra-diag terms)
(i_zero, j_zero) = (12, 49)
vals, rows, cols, _ = poisson_sparse_matrix(n, m)
rows = rows + 1*(rows >= i_zero) + 1*(rows >= j_zero)
cols = cols + 1*(cols >= i_zero) + 1*(cols >= j_zero)
# adding extra-diag terms
rows = np.concatenate([rows, [i_zero, i_zero-1, j_zero, j_zero-1]])
cols = np.concatenate([cols, [i_zero-1, i_zero, j_zero-1, j_zero]])
vals = np.concatenate([vals, [1., 1., 1., 1.]])
mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols,
(n*m + 2, n*m + 2))
mat.compress_csc()
mat_dense = mat.to_dense()
# Testing a sparse solve for all 50 basis vec
for itest in range(n*m + 2):
b = np.zeros(n*m + 2, dtype=np.float64)
b[itest] = 1.
x, _ = mtri.triinterpolate._cg(A=mat, b=b, x0=np.ones(n*m + 2),
tol=1.e-10)
assert_array_almost_equal(np.dot(mat_dense, x), b)
# 3) Now a simple test that summation of duplicate (i.e. with same rows,
# same cols) entries occurs when compressed.
vals = np.ones(17, dtype=np.float64)
rows = np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1],
dtype=np.int32)
cols = np.array([0, 1, 2, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
dtype=np.int32)
dim = (3, 3)
mat = mtri.triinterpolate._Sparse_Matrix_coo(vals, rows, cols, dim)
mat.compress_csc()
mat_dense = mat.to_dense()
assert_array_almost_equal(mat_dense, np.array([
[1., 2., 0.], [2., 1., 5.], [0., 5., 1.]], dtype=np.float64))
def test_triinterpcubic_geom_weights():
# Tests to check computation of weights for _DOF_estimator_geom:
# The weight sum per triangle can be 1. (in case all angles < 90 degrees)
# or (2*w_i) where w_i = 1-alpha_i/np.pi is the weight of apex i; alpha_i
# is the apex angle > 90 degrees.
(ax, ay) = (0., 1.687)
x = np.array([ax, 0.5*ax, 0., 1.])
y = np.array([ay, -ay, 0., 0.])
z = np.zeros(4, dtype=np.float64)
triangles = [[0, 2, 3], [1, 3, 2]]
sum_w = np.zeros([4, 2]) # 4 possibilities; 2 triangles
for theta in np.linspace(0., 2*np.pi, 14): # rotating the figure...
x_rot = np.cos(theta)*x + np.sin(theta)*y
y_rot = -np.sin(theta)*x + np.cos(theta)*y
triang = mtri.Triangulation(x_rot, y_rot, triangles)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
dof_estimator = mtri.triinterpolate._DOF_estimator_geom(cubic_geom)
weights = dof_estimator.compute_geom_weights()
# Testing for the 4 possibilities...
sum_w[0, :] = np.sum(weights, 1) - 1
for itri in range(3):
sum_w[itri+1, :] = np.sum(weights, 1) - 2*weights[:, itri]
assert_array_almost_equal(np.min(np.abs(sum_w), axis=0),
np.array([0., 0.], dtype=np.float64))
def test_triinterp_colinear():
# Tests interpolating inside a triangulation with horizontal colinear
# points (refer also to the tests :func:`test_trifinder` ).
#
# These are not valid triangulations, but we try to deal with the
# simplest violations (i. e. those handled by default TriFinder).
#
# Note that the LinearTriInterpolator and the CubicTriInterpolator with
# kind='min_E' or 'geom' still pass a linear patch test.
# We also test interpolation inside a flat triangle, by forcing
# *tri_index* in a call to :meth:`_interpolate_multikeys`.
# If +ve, triangulation is OK, if -ve triangulation invalid,
# if zero have colinear points but should pass tests anyway.
delta = 0.
x0 = np.array([1.5, 0, 1, 2, 3, 1.5, 1.5])
y0 = np.array([-1, 0, 0, 0, 0, delta, 1])
# We test different affine transformations of the initial figure; to
# avoid issues related to round-off errors we only use integer
# coefficients (otherwise the Triangulation might become invalid even with
# delta == 0).
transformations = [[1, 0], [0, 1], [1, 1], [1, 2], [-2, -1], [-2, 1]]
for transformation in transformations:
x_rot = transformation[0]*x0 + transformation[1]*y0
y_rot = -transformation[1]*x0 + transformation[0]*y0
(x, y) = (x_rot, y_rot)
z = 1.23*x - 4.79*y
triangles = [[0, 2, 1], [0, 3, 2], [0, 4, 3], [1, 2, 5], [2, 3, 5],
[3, 4, 5], [1, 5, 6], [4, 6, 5]]
triang = mtri.Triangulation(x, y, triangles)
xs = np.linspace(np.min(triang.x), np.max(triang.x), 20)
ys = np.linspace(np.min(triang.y), np.max(triang.y), 20)
xs, ys = np.meshgrid(xs, ys)
xs = xs.ravel()
ys = ys.ravel()
mask_out = (triang.get_trifinder()(xs, ys) == -1)
zs_target = np.ma.array(1.23*xs - 4.79*ys, mask=mask_out)
linear_interp = mtri.LinearTriInterpolator(triang, z)
cubic_min_E = mtri.CubicTriInterpolator(triang, z)
cubic_geom = mtri.CubicTriInterpolator(triang, z, kind='geom')
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs = interp(xs, ys)
assert_array_almost_equal(zs_target, zs)
# Testing interpolation inside the flat triangle number 4: [2, 3, 5]
# by imposing *tri_index* in a call to :meth:`_interpolate_multikeys`
itri = 4
pt1 = triang.triangles[itri, 0]
pt2 = triang.triangles[itri, 1]
xs = np.linspace(triang.x[pt1], triang.x[pt2], 10)
ys = np.linspace(triang.y[pt1], triang.y[pt2], 10)
zs_target = 1.23*xs - 4.79*ys
for interp in (linear_interp, cubic_min_E, cubic_geom):
zs, = interp._interpolate_multikeys(
xs, ys, tri_index=itri*np.ones(10, dtype=np.int32))
assert_array_almost_equal(zs_target, zs)
def test_triinterp_transformations():
# 1) Testing that the interpolation scheme is invariant by rotation of the
# whole figure.
# Note: This test is non-trivial for a CubicTriInterpolator with
# kind='min_E'. It does fail for a non-isotropic stiffness matrix E of
# :class:`_ReducedHCT_Element` (tested with E=np.diag([1., 1., 1.])), and
# provides a good test for :meth:`get_Kff_and_Ff`of the same class.
#
# 2) Also testing that the interpolation scheme is invariant by expansion
# of the whole figure along one axis.
n_angles = 20
n_radii = 10
min_radius = 0.15
def z(x, y):
r1 = np.hypot(0.5 - x, 0.5 - y)
theta1 = np.arctan2(0.5 - x, 0.5 - y)
r2 = np.hypot(-x - 0.2, -y - 0.2)
theta2 = np.arctan2(-x - 0.2, -y - 0.2)
z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) +
(np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) +
0.7*(x**2 + y**2))
return (np.max(z)-z)/(np.max(z)-np.min(z))
# First create the x and y coordinates of the points.
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0 + n_angles, 2*np.pi + n_angles,
n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x0 = (radii*np.cos(angles)).flatten()
y0 = (radii*np.sin(angles)).flatten()
triang0 = mtri.Triangulation(x0, y0) # Delaunay triangulation
z0 = z(x0, y0)
# Then create the test points
xs0 = np.linspace(-1., 1., 23)
ys0 = np.linspace(-1., 1., 23)
xs0, ys0 = np.meshgrid(xs0, ys0)
xs0 = xs0.ravel()
ys0 = ys0.ravel()
interp_z0 = {}
for i_angle in range(2):
# Rotating everything
theta = 2*np.pi / n_angles * i_angle
x = np.cos(theta)*x0 + np.sin(theta)*y0
y = -np.sin(theta)*x0 + np.cos(theta)*y0
xs = np.cos(theta)*xs0 + np.sin(theta)*ys0
ys = -np.sin(theta)*xs0 + np.cos(theta)*ys0
triang = mtri.Triangulation(x, y, triang0.triangles)
linear_interp = mtri.LinearTriInterpolator(triang, z0)
cubic_min_E = mtri.CubicTriInterpolator(triang, z0)
cubic_geom = mtri.CubicTriInterpolator(triang, z0, kind='geom')
dic_interp = {'lin': linear_interp,
'min_E': cubic_min_E,
'geom': cubic_geom}
# Testing that the interpolation is invariant by rotation...
for interp_key in ['lin', 'min_E', 'geom']:
interp = dic_interp[interp_key]
if i_angle == 0:
interp_z0[interp_key] = interp(xs0, ys0) # storage
else:
interpz = interp(xs, ys)
matest.assert_array_almost_equal(interpz,
interp_z0[interp_key])
scale_factor = 987654.3210
for scaled_axis in ('x', 'y'):
# Scaling everything (expansion along scaled_axis)
if scaled_axis == 'x':
x = scale_factor * x0
y = y0
xs = scale_factor * xs0
ys = ys0
else:
x = x0
y = scale_factor * y0
xs = xs0
ys = scale_factor * ys0
triang = mtri.Triangulation(x, y, triang0.triangles)
linear_interp = mtri.LinearTriInterpolator(triang, z0)
cubic_min_E = mtri.CubicTriInterpolator(triang, z0)
cubic_geom = mtri.CubicTriInterpolator(triang, z0, kind='geom')
dic_interp = {'lin': linear_interp,
'min_E': cubic_min_E,
'geom': cubic_geom}
# Test that the interpolation is invariant by expansion along 1 axis...
for interp_key in ['lin', 'min_E', 'geom']:
interpz = dic_interp[interp_key](xs, ys)
matest.assert_array_almost_equal(interpz, interp_z0[interp_key])
@image_comparison(baseline_images=['tri_smooth_contouring'],
extensions=['png'], remove_text=True, tol=0.07)
def test_tri_smooth_contouring():
# Image comparison based on example tricontour_smooth_user.
n_angles = 20
n_radii = 10
min_radius = 0.15
def z(x, y):
r1 = np.hypot(0.5 - x, 0.5 - y)
theta1 = np.arctan2(0.5 - x, 0.5 - y)
r2 = np.hypot(-x - 0.2, -y - 0.2)
theta2 = np.arctan2(-x - 0.2, -y - 0.2)
z = -(2*(np.exp((r1/10)**2)-1)*30. * np.cos(7.*theta1) +
(np.exp((r2/10)**2)-1)*30. * np.cos(11.*theta2) +
0.7*(x**2 + y**2))
return (np.max(z)-z)/(np.max(z)-np.min(z))
# First create the x and y coordinates of the points.
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0 + n_angles, 2*np.pi + n_angles,
n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x0 = (radii*np.cos(angles)).flatten()
y0 = (radii*np.sin(angles)).flatten()
triang0 = mtri.Triangulation(x0, y0) # Delaunay triangulation
z0 = z(x0, y0)
triang0.set_mask(np.hypot(x0[triang0.triangles].mean(axis=1),
y0[triang0.triangles].mean(axis=1))
< min_radius)
# Then the plot
refiner = mtri.UniformTriRefiner(triang0)
tri_refi, z_test_refi = refiner.refine_field(z0, subdiv=4)
levels = np.arange(0., 1., 0.025)
plt.triplot(triang0, lw=0.5, color='0.5')
plt.tricontour(tri_refi, z_test_refi, levels=levels, colors="black")
@image_comparison(baseline_images=['tri_smooth_gradient'],
extensions=['png'], remove_text=True, tol=0.092)
def test_tri_smooth_gradient():
# Image comparison based on example trigradient_demo.
def dipole_potential(x, y):
"""An electric dipole potential V."""
r_sq = x**2 + y**2
theta = np.arctan2(y, x)
z = np.cos(theta)/r_sq
return (np.max(z)-z) / (np.max(z)-np.min(z))
# Creating a Triangulation
n_angles = 30
n_radii = 10
min_radius = 0.2
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
V = dipole_potential(x, y)
triang = mtri.Triangulation(x, y)
triang.set_mask(np.hypot(x[triang.triangles].mean(axis=1),
y[triang.triangles].mean(axis=1))
< min_radius)
# Refine data - interpolates the electrical potential V
refiner = mtri.UniformTriRefiner(triang)
tri_refi, z_test_refi = refiner.refine_field(V, subdiv=3)
# Computes the electrical field (Ex, Ey) as gradient of -V
tci = mtri.CubicTriInterpolator(triang, -V)
Ex, Ey = tci.gradient(triang.x, triang.y)
E_norm = np.hypot(Ex, Ey)
# Plot the triangulation, the potential iso-contours and the vector field
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(triang, color='0.8')
levels = np.arange(0., 1., 0.01)
cmap = cm.get_cmap(name='hot', lut=None)
plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap,
linewidths=[2.0, 1.0, 1.0, 1.0])
# Plots direction of the electrical vector field
plt.quiver(triang.x, triang.y, Ex/E_norm, Ey/E_norm,
units='xy', scale=10., zorder=3, color='blue',
width=0.007, headwidth=3., headlength=4.)
# We are leaving ax.use_sticky_margins as True, so the
# view limits are the contour data limits.
def test_tritools():
# Tests TriAnalyzer.scale_factors on masked triangulation
# Tests circle_ratios on equilateral and right-angled triangle.
x = np.array([0., 1., 0.5, 0., 2.])
y = np.array([0., 0., 0.5*np.sqrt(3.), -1., 1.])
triangles = np.array([[0, 1, 2], [0, 1, 3], [1, 2, 4]], dtype=np.int32)
mask = np.array([False, False, True], dtype=bool)
triang = mtri.Triangulation(x, y, triangles, mask=mask)
analyser = mtri.TriAnalyzer(triang)
assert_array_almost_equal(analyser.scale_factors,
np.array([1., 1./(1.+0.5*np.sqrt(3.))]))
assert_array_almost_equal(
analyser.circle_ratios(rescale=False),
np.ma.masked_array([0.5, 1./(1.+np.sqrt(2.)), np.nan], mask))
# Tests circle ratio of a flat triangle
x = np.array([0., 1., 2.])
y = np.array([1., 1.+3., 1.+6.])
triangles = np.array([[0, 1, 2]], dtype=np.int32)
triang = mtri.Triangulation(x, y, triangles)
analyser = mtri.TriAnalyzer(triang)
assert_array_almost_equal(analyser.circle_ratios(), np.array([0.]))
# Tests TriAnalyzer.get_flat_tri_mask
# Creates a triangulation of [-1, 1] x [-1, 1] with contiguous groups of
# 'flat' triangles at the 4 corners and at the center. Checks that only
# those at the borders are eliminated by TriAnalyzer.get_flat_tri_mask
n = 9
def power(x, a):
return np.abs(x)**a*np.sign(x)
x = np.linspace(-1., 1., n+1)
x, y = np.meshgrid(power(x, 2.), power(x, 0.25))
x = x.ravel()
y = y.ravel()
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1))
analyser = mtri.TriAnalyzer(triang)
mask_flat = analyser.get_flat_tri_mask(0.2)
verif_mask = np.zeros(162, dtype=bool)
corners_index = [0, 1, 2, 3, 14, 15, 16, 17, 18, 19, 34, 35, 126, 127,
142, 143, 144, 145, 146, 147, 158, 159, 160, 161]
verif_mask[corners_index] = True
assert_array_equal(mask_flat, verif_mask)
# Now including a hole (masked triangle) at the center. The center also
# shall be eliminated by get_flat_tri_mask.
mask = np.zeros(162, dtype=bool)
mask[80] = True
triang.set_mask(mask)
mask_flat = analyser.get_flat_tri_mask(0.2)
center_index = [44, 45, 62, 63, 78, 79, 80, 81, 82, 83, 98, 99, 116, 117]
verif_mask[center_index] = True
assert_array_equal(mask_flat, verif_mask)
def test_trirefine():
# Testing subdiv=2 refinement
n = 3
subdiv = 2
x = np.linspace(-1., 1., n+1)
x, y = np.meshgrid(x, x)
x = x.ravel()
y = y.ravel()
mask = np.zeros(2*n**2, dtype=bool)
mask[n**2:] = True
triang = mtri.Triangulation(x, y, triangles=meshgrid_triangles(n+1),
mask=mask)
refiner = mtri.UniformTriRefiner(triang)
refi_triang = refiner.refine_triangulation(subdiv=subdiv)
x_refi = refi_triang.x
y_refi = refi_triang.y
n_refi = n * subdiv**2
x_verif = np.linspace(-1., 1., n_refi+1)
x_verif, y_verif = np.meshgrid(x_verif, x_verif)
x_verif = x_verif.ravel()
y_verif = y_verif.ravel()
ind1d = np.in1d(np.around(x_verif*(2.5+y_verif), 8),
np.around(x_refi*(2.5+y_refi), 8))
assert_array_equal(ind1d, True)
# Testing the mask of the refined triangulation
refi_mask = refi_triang.mask
refi_tri_barycenter_x = np.sum(refi_triang.x[refi_triang.triangles],
axis=1) / 3.
refi_tri_barycenter_y = np.sum(refi_triang.y[refi_triang.triangles],
axis=1) / 3.
tri_finder = triang.get_trifinder()
refi_tri_indices = tri_finder(refi_tri_barycenter_x,
refi_tri_barycenter_y)
refi_tri_mask = triang.mask[refi_tri_indices]
assert_array_equal(refi_mask, refi_tri_mask)
# Testing that the numbering of triangles does not change the
# interpolation result.
x = np.asarray([0.0, 1.0, 0.0, 1.0])
y = np.asarray([0.0, 0.0, 1.0, 1.0])
triang = [mtri.Triangulation(x, y, [[0, 1, 3], [3, 2, 0]]),
mtri.Triangulation(x, y, [[0, 1, 3], [2, 0, 3]])]
z = np.hypot(x - 0.3, y - 0.4)
# Refining the 2 triangulations and reordering the points
xyz_data = []
for i in range(2):
refiner = mtri.UniformTriRefiner(triang[i])
refined_triang, refined_z = refiner.refine_field(z, subdiv=1)
xyz = np.dstack((refined_triang.x, refined_triang.y, refined_z))[0]
xyz = xyz[np.lexsort((xyz[:, 1], xyz[:, 0]))]
xyz_data += [xyz]
assert_array_almost_equal(xyz_data[0], xyz_data[1])
def meshgrid_triangles(n):
"""
Return (2*(N-1)**2, 3) array of triangles to mesh (N, N)-point np.meshgrid.
"""
tri = []
for i in range(n-1):
for j in range(n-1):
a = i + j*(n)
b = (i+1) + j*n
c = i + (j+1)*n
d = (i+1) + (j+1)*n
tri += [[a, b, d], [a, d, c]]
return np.array(tri, dtype=np.int32)
def test_triplot_return():
# Check that triplot returns the artists it adds
from matplotlib.figure import Figure
ax = Figure().add_axes([0.1, 0.1, 0.7, 0.7])
triang = mtri.Triangulation(
[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0],
triangles=[[0, 1, 3], [3, 2, 0]])
assert ax.triplot(triang, "b-") is not None, \
'triplot should return the artist it adds'
def test_trirefiner_fortran_contiguous_triangles():
# github issue 4180. Test requires two arrays of triangles that are
# identical except that one is C-contiguous and one is fortran-contiguous.
triangles1 = np.array([[2, 0, 3], [2, 1, 0]])
assert not np.isfortran(triangles1)
triangles2 = np.array(triangles1, copy=True, order='F')
assert np.isfortran(triangles2)
x = np.array([0.39, 0.59, 0.43, 0.32])
y = np.array([33.99, 34.01, 34.19, 34.18])
triang1 = mtri.Triangulation(x, y, triangles1)
triang2 = mtri.Triangulation(x, y, triangles2)
refiner1 = mtri.UniformTriRefiner(triang1)
refiner2 = mtri.UniformTriRefiner(triang2)
fine_triang1 = refiner1.refine_triangulation(subdiv=1)
fine_triang2 = refiner2.refine_triangulation(subdiv=1)
assert_array_equal(fine_triang1.triangles, fine_triang2.triangles)
def test_qhull_triangle_orientation():
# github issue 4437.
xi = np.linspace(-2, 2, 100)
x, y = map(np.ravel, np.meshgrid(xi, xi))
w = (x > y - 1) & (x < -1.95) & (y > -1.2)
x, y = x[w], y[w]
theta = np.radians(25)
x1 = x*np.cos(theta) - y*np.sin(theta)
y1 = x*np.sin(theta) + y*np.cos(theta)
# Calculate Delaunay triangulation using Qhull.
triang = mtri.Triangulation(x1, y1)
# Neighbors returned by Qhull.
qhull_neighbors = triang.neighbors
# Obtain neighbors using own C++ calculation.
triang._neighbors = None
own_neighbors = triang.neighbors
assert_array_equal(qhull_neighbors, own_neighbors)
def test_trianalyzer_mismatched_indices():
# github issue 4999.
x = np.array([0., 1., 0.5, 0., 2.])
y = np.array([0., 0., 0.5*np.sqrt(3.), -1., 1.])
triangles = np.array([[0, 1, 2], [0, 1, 3], [1, 2, 4]], dtype=np.int32)
mask = np.array([False, False, True], dtype=bool)
triang = mtri.Triangulation(x, y, triangles, mask=mask)
analyser = mtri.TriAnalyzer(triang)
# numpy >= 1.10 raises a VisibleDeprecationWarning in the following line
# prior to the fix.
triang2 = analyser._get_compressed_triangulation()
def test_tricontourf_decreasing_levels():
# github issue 5477.
x = [0.0, 1.0, 1.0]
y = [0.0, 0.0, 1.0]
z = [0.2, 0.4, 0.6]
plt.figure()
with pytest.raises(ValueError):
plt.tricontourf(x, y, z, [1.0, 0.0])
def test_internal_cpp_api():
# Following github issue 8197.
import matplotlib._tri as _tri
# C++ Triangulation.
with pytest.raises(TypeError) as excinfo:
triang = _tri.Triangulation()
excinfo.match(r'function takes exactly 7 arguments \(0 given\)')
with pytest.raises(ValueError) as excinfo:
triang = _tri.Triangulation([], [1], [[]], None, None, None, False)
excinfo.match(r'x and y must be 1D arrays of the same length')
x = [0, 1, 1]
y = [0, 0, 1]
with pytest.raises(ValueError) as excinfo:
triang = _tri.Triangulation(x, y, [[0, 1]], None, None, None, False)
excinfo.match(r'triangles must be a 2D array of shape \(\?,3\)')
tris = [[0, 1, 2]]
with pytest.raises(ValueError) as excinfo:
triang = _tri.Triangulation(x, y, tris, [0, 1], None, None, False)
excinfo.match(r'mask must be a 1D array with the same length as the ' +
r'triangles array')
with pytest.raises(ValueError) as excinfo:
triang = _tri.Triangulation(x, y, tris, None, [[1]], None, False)
excinfo.match(r'edges must be a 2D array with shape \(\?,2\)')
with pytest.raises(ValueError) as excinfo:
triang = _tri.Triangulation(x, y, tris, None, None, [[-1]], False)
excinfo.match(r'neighbors must be a 2D array with the same shape as the ' +
r'triangles array')
triang = _tri.Triangulation(x, y, tris, None, None, None, False)
with pytest.raises(ValueError) as excinfo:
triang.calculate_plane_coefficients([])
excinfo.match(r'z array must have same length as triangulation x and y ' +
r'arrays')
with pytest.raises(ValueError) as excinfo:
triang.set_mask([0, 1])
excinfo.match(r'mask must be a 1D array with the same length as the ' +
r'triangles array')
# C++ TriContourGenerator.
with pytest.raises(TypeError) as excinfo:
tcg = _tri.TriContourGenerator()
excinfo.match(r'function takes exactly 2 arguments \(0 given\)')
with pytest.raises(ValueError) as excinfo:
tcg = _tri.TriContourGenerator(triang, [1])
excinfo.match(r'z must be a 1D array with the same length as the x and ' +
r'y arrays')
z = [0, 1, 2]
tcg = _tri.TriContourGenerator(triang, z)
with pytest.raises(ValueError) as excinfo:
tcg.create_filled_contour(1, 0)
excinfo.match(r'filled contour levels must be increasing')
# C++ TrapezoidMapTriFinder.
with pytest.raises(TypeError) as excinfo:
trifinder = _tri.TrapezoidMapTriFinder()
excinfo.match(r'function takes exactly 1 argument \(0 given\)')
trifinder = _tri.TrapezoidMapTriFinder(triang)
with pytest.raises(ValueError) as excinfo:
trifinder.find_many([0], [0, 1])
excinfo.match(r'x and y must be array_like with same shape')
def test_qhull_large_offset():
# github issue 8682.
x = np.asarray([0, 1, 0, 1, 0.5])
y = np.asarray([0, 0, 1, 1, 0.5])
offset = 1e10
triang = mtri.Triangulation(x, y)
triang_offset = mtri.Triangulation(x + offset, y + offset)
assert len(triang.triangles) == len(triang_offset.triangles)
|
270f3f04c5bcf8ce14103ee6530c0ac4a1a2ca2cb7514c0eba908518bebeacda
|
import collections
import platform
from unittest import mock
import numpy as np
import pytest
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.transforms as mtransforms
import matplotlib.collections as mcollections
from matplotlib.legend_handler import HandlerTuple
import matplotlib.legend as mlegend
from matplotlib.cbook.deprecation import MatplotlibDeprecationWarning
from matplotlib import rc_context
def test_legend_ordereddict():
# smoketest that ordereddict inputs work...
X = np.random.randn(10)
Y = np.random.randn(10)
labels = ['a'] * 5 + ['b'] * 5
colors = ['r'] * 5 + ['g'] * 5
fig, ax = plt.subplots()
for x, y, label, color in zip(X, Y, labels, colors):
ax.scatter(x, y, label=label, c=color)
handles, labels = ax.get_legend_handles_labels()
legend = collections.OrderedDict(zip(labels, handles))
ax.legend(legend.values(), legend.keys(),
loc='center left', bbox_to_anchor=(1, .5))
@image_comparison(baseline_images=['legend_auto1'], remove_text=True)
def test_legend_auto1():
'Test automatic legend placement'
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.arange(100)
ax.plot(x, 50 - x, 'o', label='y=1')
ax.plot(x, x - 50, 'o', label='y=-1')
ax.legend(loc='best')
@image_comparison(baseline_images=['legend_auto2'], remove_text=True)
def test_legend_auto2():
'Test automatic legend placement'
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.arange(100)
b1 = ax.bar(x, x, align='edge', color='m')
b2 = ax.bar(x, x[::-1], align='edge', color='g')
ax.legend([b1[0], b2[0]], ['up', 'down'], loc='best')
@image_comparison(baseline_images=['legend_auto3'])
def test_legend_auto3():
'Test automatic legend placement'
fig = plt.figure()
ax = fig.add_subplot(111)
x = [0.9, 0.1, 0.1, 0.9, 0.9, 0.5]
y = [0.95, 0.95, 0.05, 0.05, 0.5, 0.5]
ax.plot(x, y, 'o-', label='line')
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.0, 1.0)
ax.legend(loc='best')
@image_comparison(baseline_images=['legend_various_labels'], remove_text=True)
def test_various_labels():
# tests all sorts of label types
fig = plt.figure()
ax = fig.add_subplot(121)
ax.plot(np.arange(4), 'o', label=1)
ax.plot(np.linspace(4, 4.1), 'o', label='Développés')
ax.plot(np.arange(4, 1, -1), 'o', label='__nolegend__')
ax.legend(numpoints=1, loc='best')
@image_comparison(baseline_images=['legend_labels_first'], extensions=['png'],
remove_text=True)
def test_labels_first():
# test labels to left of markers
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.arange(10), '-o', label=1)
ax.plot(np.ones(10)*5, ':x', label="x")
ax.plot(np.arange(20, 10, -1), 'd', label="diamond")
ax.legend(loc='best', markerfirst=False)
@image_comparison(baseline_images=['legend_multiple_keys'], extensions=['png'],
remove_text=True)
def test_multiple_keys():
# test legend entries with multiple keys
fig = plt.figure()
ax = fig.add_subplot(111)
p1, = ax.plot([1, 2, 3], '-o')
p2, = ax.plot([2, 3, 4], '-x')
p3, = ax.plot([3, 4, 5], '-d')
ax.legend([(p1, p2), (p2, p1), p3], ['two keys', 'pad=0', 'one key'],
numpoints=1,
handler_map={(p1, p2): HandlerTuple(ndivide=None),
(p2, p1): HandlerTuple(ndivide=None, pad=0)})
@image_comparison(baseline_images=['rgba_alpha'],
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
extensions=['png'], remove_text=True)
def test_alpha_rgba():
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(range(10), lw=5)
leg = plt.legend(['Longlabel that will go away'], loc='center')
leg.legendPatch.set_facecolor([1, 0, 0, 0.5])
@image_comparison(baseline_images=['rcparam_alpha'],
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
extensions=['png'], remove_text=True)
def test_alpha_rcparam():
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(range(10), lw=5)
with mpl.rc_context(rc={'legend.framealpha': .75}):
leg = plt.legend(['Longlabel that will go away'], loc='center')
# this alpha is going to be over-ridden by the rcparam with
# sets the alpha of the patch to be non-None which causes the alpha
# value of the face color to be discarded. This behavior may not be
# ideal, but it is what it is and we should keep track of it changing
leg.legendPatch.set_facecolor([1, 0, 0, 0.5])
@image_comparison(baseline_images=['fancy'], remove_text=True)
def test_fancy():
# using subplot triggers some offsetbox functionality untested elsewhere
plt.subplot(121)
plt.scatter(np.arange(10), np.arange(10, 0, -1), label='XX\nXX')
plt.plot([5] * 10, 'o--', label='XX')
plt.errorbar(np.arange(10), np.arange(10), xerr=0.5,
yerr=0.5, label='XX')
plt.legend(loc="center left", bbox_to_anchor=[1.0, 0.5],
ncol=2, shadow=True, title="My legend", numpoints=1)
@image_comparison(baseline_images=['framealpha'], remove_text=True,
tol={'aarch64': 0.02}.get(platform.machine(), 0.0))
def test_framealpha():
x = np.linspace(1, 100, 100)
y = x
plt.plot(x, y, label='mylabel', lw=10)
plt.legend(framealpha=0.5)
@image_comparison(baseline_images=['scatter_rc3', 'scatter_rc1'],
remove_text=True)
def test_rc():
# using subplot triggers some offsetbox functionality untested elsewhere
plt.figure()
ax = plt.subplot(121)
ax.scatter(np.arange(10), np.arange(10, 0, -1), label='three')
ax.legend(loc="center left", bbox_to_anchor=[1.0, 0.5],
title="My legend")
mpl.rcParams['legend.scatterpoints'] = 1
plt.figure()
ax = plt.subplot(121)
ax.scatter(np.arange(10), np.arange(10, 0, -1), label='one')
ax.legend(loc="center left", bbox_to_anchor=[1.0, 0.5],
title="My legend")
@image_comparison(baseline_images=['legend_expand'], remove_text=True)
def test_legend_expand():
'Test expand mode'
legend_modes = [None, "expand"]
fig, axes_list = plt.subplots(len(legend_modes), 1)
x = np.arange(100)
for ax, mode in zip(axes_list, legend_modes):
ax.plot(x, 50 - x, 'o', label='y=1')
l1 = ax.legend(loc='upper left', mode=mode)
ax.add_artist(l1)
ax.plot(x, x - 50, 'o', label='y=-1')
l2 = ax.legend(loc='right', mode=mode)
ax.add_artist(l2)
ax.legend(loc='lower left', mode=mode, ncol=2)
@image_comparison(baseline_images=['hatching'], remove_text=True,
style='default')
def test_hatching():
fig, ax = plt.subplots()
# Patches
patch = plt.Rectangle((0, 0), 0.3, 0.3, hatch='xx',
label='Patch\ndefault color\nfilled')
ax.add_patch(patch)
patch = plt.Rectangle((0.33, 0), 0.3, 0.3, hatch='||', edgecolor='C1',
label='Patch\nexplicit color\nfilled')
ax.add_patch(patch)
patch = plt.Rectangle((0, 0.4), 0.3, 0.3, hatch='xx', fill=False,
label='Patch\ndefault color\nunfilled')
ax.add_patch(patch)
patch = plt.Rectangle((0.33, 0.4), 0.3, 0.3, hatch='||', fill=False,
edgecolor='C1',
label='Patch\nexplicit color\nunfilled')
ax.add_patch(patch)
# Paths
ax.fill_between([0, .15, .3], [.8, .8, .8], [.9, 1.0, .9],
hatch='+', label='Path\ndefault color')
ax.fill_between([.33, .48, .63], [.8, .8, .8], [.9, 1.0, .9],
hatch='+', edgecolor='C2', label='Path\nexplicit color')
ax.set_xlim(-0.01, 1.1)
ax.set_ylim(-0.01, 1.1)
ax.legend(handlelength=4, handleheight=4)
def test_legend_remove():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
lines = ax.plot(range(10))
leg = fig.legend(lines, "test")
leg.remove()
assert fig.legends == []
leg = ax.legend("test")
leg.remove()
assert ax.get_legend() is None
class TestLegendFunction(object):
# Tests the legend function on the Axes and pyplot.
def test_legend_handle_label(self):
lines = plt.plot(range(10))
with mock.patch('matplotlib.legend.Legend') as Legend:
plt.legend(lines, ['hello world'])
Legend.assert_called_with(plt.gca(), lines, ['hello world'])
def test_legend_no_args(self):
lines = plt.plot(range(10), label='hello world')
with mock.patch('matplotlib.legend.Legend') as Legend:
plt.legend()
Legend.assert_called_with(plt.gca(), lines, ['hello world'])
def test_legend_label_args(self):
lines = plt.plot(range(10), label='hello world')
with mock.patch('matplotlib.legend.Legend') as Legend:
plt.legend(['foobar'])
Legend.assert_called_with(plt.gca(), lines, ['foobar'])
def test_legend_three_args(self):
lines = plt.plot(range(10), label='hello world')
with mock.patch('matplotlib.legend.Legend') as Legend:
plt.legend(lines, ['foobar'], loc='right')
Legend.assert_called_with(plt.gca(), lines, ['foobar'], loc='right')
def test_legend_handler_map(self):
lines = plt.plot(range(10), label='hello world')
with mock.patch('matplotlib.legend.'
'_get_legend_handles_labels') as handles_labels:
handles_labels.return_value = lines, ['hello world']
plt.legend(handler_map={'1': 2})
handles_labels.assert_called_with([plt.gca()], {'1': 2})
def test_kwargs(self):
fig, ax = plt.subplots(1, 1)
th = np.linspace(0, 2*np.pi, 1024)
lns, = ax.plot(th, np.sin(th), label='sin', lw=5)
lnc, = ax.plot(th, np.cos(th), label='cos', lw=5)
with mock.patch('matplotlib.legend.Legend') as Legend:
ax.legend(labels=('a', 'b'), handles=(lnc, lns))
Legend.assert_called_with(ax, (lnc, lns), ('a', 'b'))
def test_warn_args_kwargs(self):
fig, ax = plt.subplots(1, 1)
th = np.linspace(0, 2*np.pi, 1024)
lns, = ax.plot(th, np.sin(th), label='sin', lw=5)
lnc, = ax.plot(th, np.cos(th), label='cos', lw=5)
with pytest.warns(UserWarning) as record:
ax.legend((lnc, lns), labels=('a', 'b'))
assert len(record) == 1
assert str(record[0].message) == (
"You have mixed positional and keyword arguments, some input may "
"be discarded.")
def test_parasite(self):
from mpl_toolkits.axes_grid1 import host_subplot
host = host_subplot(111)
par = host.twinx()
p1, = host.plot([0, 1, 2], [0, 1, 2], label="Density")
p2, = par.plot([0, 1, 2], [0, 3, 2], label="Temperature")
with mock.patch('matplotlib.legend.Legend') as Legend:
leg = plt.legend()
Legend.assert_called_with(host, [p1, p2],
['Density', 'Temperature'])
class TestLegendFigureFunction(object):
# Tests the legend function for figure
def test_legend_handle_label(self):
fig, ax = plt.subplots()
lines = ax.plot(range(10))
with mock.patch('matplotlib.legend.Legend') as Legend:
fig.legend(lines, ['hello world'])
Legend.assert_called_with(fig, lines, ['hello world'])
def test_legend_no_args(self):
fig, ax = plt.subplots()
lines = ax.plot(range(10), label='hello world')
with mock.patch('matplotlib.legend.Legend') as Legend:
fig.legend()
Legend.assert_called_with(fig, lines, ['hello world'])
def test_legend_label_arg(self):
fig, ax = plt.subplots()
lines = ax.plot(range(10))
with mock.patch('matplotlib.legend.Legend') as Legend:
fig.legend(['foobar'])
Legend.assert_called_with(fig, lines, ['foobar'])
def test_legend_label_three_args(self):
fig, ax = plt.subplots()
lines = ax.plot(range(10))
with mock.patch('matplotlib.legend.Legend') as Legend:
fig.legend(lines, ['foobar'], 'right')
Legend.assert_called_with(fig, lines, ['foobar'], 'right')
def test_legend_label_three_args_pluskw(self):
# test that third argument and loc= called together give
# Exception
fig, ax = plt.subplots()
lines = ax.plot(range(10))
with pytest.raises(Exception):
fig.legend(lines, ['foobar'], 'right', loc='left')
def test_legend_kw_args(self):
fig, axs = plt.subplots(1, 2)
lines = axs[0].plot(range(10))
lines2 = axs[1].plot(np.arange(10) * 2.)
with mock.patch('matplotlib.legend.Legend') as Legend:
fig.legend(loc='right', labels=('a', 'b'),
handles=(lines, lines2))
Legend.assert_called_with(fig, (lines, lines2), ('a', 'b'),
loc='right')
def test_warn_args_kwargs(self):
fig, axs = plt.subplots(1, 2)
lines = axs[0].plot(range(10))
lines2 = axs[1].plot(np.arange(10) * 2.)
with pytest.warns(UserWarning) as record:
fig.legend((lines, lines2), labels=('a', 'b'))
assert len(record) == 1
assert str(record[0].message) == (
"You have mixed positional and keyword arguments, some input may "
"be discarded.")
@image_comparison(baseline_images=['legend_stackplot'], extensions=['png'])
def test_legend_stackplot():
'''test legend for PolyCollection using stackplot'''
# related to #1341, #1943, and PR #3303
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.linspace(0, 10, 10)
y1 = 1.0 * x
y2 = 2.0 * x + 1
y3 = 3.0 * x + 2
ax.stackplot(x, y1, y2, y3, labels=['y1', 'y2', 'y3'])
ax.set_xlim((0, 10))
ax.set_ylim((0, 70))
ax.legend(loc='best')
def test_cross_figure_patch_legend():
fig, ax = plt.subplots()
fig2, ax2 = plt.subplots()
brs = ax.bar(range(3), range(3))
fig2.legend(brs, 'foo')
def test_nanscatter():
fig, ax = plt.subplots()
h = ax.scatter([np.nan], [np.nan], marker="o",
facecolor="r", edgecolor="r", s=3)
ax.legend([h], ["scatter"])
fig, ax = plt.subplots()
for color in ['red', 'green', 'blue']:
n = 750
x, y = np.random.rand(2, n)
scale = 200.0 * np.random.rand(n)
ax.scatter(x, y, c=color, s=scale, label=color,
alpha=0.3, edgecolors='none')
ax.legend()
ax.grid(True)
def test_legend_repeatcheckok():
fig, ax = plt.subplots()
ax.scatter(0.0, 1.0, color='k', marker='o', label='test')
ax.scatter(0.5, 0.0, color='r', marker='v', label='test')
hl = ax.legend()
hand, lab = mlegend._get_legend_handles_labels([ax])
assert len(lab) == 2
fig, ax = plt.subplots()
ax.scatter(0.0, 1.0, color='k', marker='o', label='test')
ax.scatter(0.5, 0.0, color='k', marker='v', label='test')
hl = ax.legend()
hand, lab = mlegend._get_legend_handles_labels([ax])
assert len(lab) == 2
@image_comparison(baseline_images=['not_covering_scatter'], extensions=['png'])
def test_not_covering_scatter():
colors = ['b', 'g', 'r']
for n in range(3):
plt.scatter([n], [n], color=colors[n])
plt.legend(['foo', 'foo', 'foo'], loc='best')
plt.gca().set_xlim(-0.5, 2.2)
plt.gca().set_ylim(-0.5, 2.2)
@image_comparison(baseline_images=['not_covering_scatter_transform'],
extensions=['png'])
def test_not_covering_scatter_transform():
# Offsets point to top left, the default auto position
offset = mtransforms.Affine2D().translate(-20, 20)
x = np.linspace(0, 30, 1000)
plt.plot(x, x)
plt.scatter([20], [10], transform=offset + plt.gca().transData)
plt.legend(['foo', 'bar'], loc='best')
def test_linecollection_scaled_dashes():
lines1 = [[(0, .5), (.5, 1)], [(.3, .6), (.2, .2)]]
lines2 = [[[0.7, .2], [.8, .4]], [[.5, .7], [.6, .1]]]
lines3 = [[[0.6, .2], [.8, .4]], [[.5, .7], [.1, .1]]]
lc1 = mcollections.LineCollection(lines1, linestyles="--", lw=3)
lc2 = mcollections.LineCollection(lines2, linestyles="-.")
lc3 = mcollections.LineCollection(lines3, linestyles=":", lw=.5)
fig, ax = plt.subplots()
ax.add_collection(lc1)
ax.add_collection(lc2)
ax.add_collection(lc3)
leg = ax.legend([lc1, lc2, lc3], ["line1", "line2", 'line 3'])
h1, h2, h3 = leg.legendHandles
for oh, lh in zip((lc1, lc2, lc3), (h1, h2, h3)):
assert oh.get_linestyles()[0][1] == lh._dashSeq
assert oh.get_linestyles()[0][0] == lh._dashOffset
def test_handler_numpoints():
"""Test legend handler with numpoints <= 1."""
# related to #6921 and PR #8478
fig, ax = plt.subplots()
ax.plot(range(5), label='test')
ax.legend(numpoints=0.5)
def test_empty_bar_chart_with_legend():
"""Test legend when bar chart is empty with a label."""
# related to issue #13003. Calling plt.legend() should not
# raise an IndexError.
plt.bar([], [], label='test')
plt.legend()
def test_shadow_framealpha():
# Test if framealpha is activated when shadow is True
# and framealpha is not explicitly passed'''
fig, ax = plt.subplots()
ax.plot(range(100), label="test")
leg = ax.legend(shadow=True, facecolor='w')
assert leg.get_frame().get_alpha() == 1
def test_legend_title_empty():
# test that if we don't set the legend title, that
# it comes back as an empty string, and that it is not
# visible:
fig, ax = plt.subplots()
ax.plot(range(10))
leg = ax.legend()
assert leg.get_title().get_text() == ""
assert not leg.get_title().get_visible()
def test_legend_proper_window_extent():
# test that legend returns the expected extent under various dpi...
fig, ax = plt.subplots(dpi=100)
ax.plot(range(10), label='Aardvark')
leg = ax.legend()
x01 = leg.get_window_extent(fig.canvas.get_renderer()).x0
fig, ax = plt.subplots(dpi=200)
ax.plot(range(10), label='Aardvark')
leg = ax.legend()
x02 = leg.get_window_extent(fig.canvas.get_renderer()).x0
assert pytest.approx(x01*2, 0.1) == x02
def test_window_extent_cached_renderer():
fig, ax = plt.subplots(dpi=100)
ax.plot(range(10), label='Aardvark')
leg = ax.legend()
leg2 = fig.legend()
fig.canvas.draw()
# check that get_window_extent will use the cached renderer
leg.get_window_extent()
leg2.get_window_extent()
def test_legend_title_fontsize():
# test the title_fontsize kwarg
fig, ax = plt.subplots()
ax.plot(range(10))
leg = ax.legend(title='Aardvark', title_fontsize=22)
assert leg.get_title().get_fontsize() == 22
def test_get_set_draggable():
legend = plt.legend()
assert not legend.get_draggable()
legend.set_draggable(True)
assert legend.get_draggable()
legend.set_draggable(False)
assert not legend.get_draggable()
def test_alpha_handles():
x, n, hh = plt.hist([1, 2, 3], alpha=0.25, label='data', color='red')
legend = plt.legend()
for lh in legend.legendHandles:
lh.set_alpha(1.0)
assert lh.get_facecolor()[:-1] == hh[1].get_facecolor()[:-1]
assert lh.get_edgecolor()[:-1] == hh[1].get_edgecolor()[:-1]
def test_warn_big_data_best_loc():
fig, ax = plt.subplots()
ax.plot(np.arange(200001), label='Is this big data?')
with pytest.warns(UserWarning) as records:
with rc_context({'legend.loc': 'best'}):
l = ax.legend()
fig.canvas.draw()
# The _find_best_position method of Legend is called twice, duplicating
# the warning message.
assert len(records) == 2
for record in records:
assert str(record.message) == (
'Creating legend with loc="best" can be slow with large'
' amounts of data.')
def test_no_warn_big_data_when_loc_specified():
fig, ax = plt.subplots()
ax.plot(np.arange(200001), label='Is this big data?')
with pytest.warns(None) as records:
l = ax.legend('best')
fig.canvas.draw()
assert len(records) == 0
|
f89b77214e0363fc8bf9fe53e9ed8988eeb57c938ad0be6e025c624020532e93
|
import copy
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from matplotlib import patches
from matplotlib.path import Path
from matplotlib.patches import Polygon
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.backend_bases import MouseEvent
def test_empty_closed_path():
path = Path(np.zeros((0, 2)), closed=True)
assert path.vertices.shape == (0, 2)
assert path.codes is None
def test_readonly_path():
path = Path.unit_circle()
def modify_vertices():
path.vertices = path.vertices * 2.0
with pytest.raises(AttributeError):
modify_vertices()
def test_point_in_path():
# Test #1787
verts2 = [(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)]
path = Path(verts2, closed=True)
points = [(0.5, 0.5), (1.5, 0.5)]
ret = path.contains_points(points)
assert ret.dtype == 'bool'
assert np.all(ret == [True, False])
def test_contains_points_negative_radius():
path = Path.unit_circle()
points = [(0.0, 0.0), (1.25, 0.0), (0.9, 0.9)]
expected = [True, False, False]
result = path.contains_points(points, radius=-0.5)
assert np.all(result == expected)
def test_point_in_path_nan():
box = np.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])
p = Path(box)
test = np.array([[np.nan, 0.5]])
contains = p.contains_points(test)
assert len(contains) == 1
assert not contains[0]
def test_nonlinear_containment():
fig, ax = plt.subplots()
ax.set(xscale="log", ylim=(0, 1))
polygon = ax.axvspan(1, 10)
assert polygon.get_path().contains_point(
ax.transData.transform_point((5, .5)), ax.transData)
assert not polygon.get_path().contains_point(
ax.transData.transform_point((.5, .5)), ax.transData)
assert not polygon.get_path().contains_point(
ax.transData.transform_point((50, .5)), ax.transData)
@image_comparison(
baseline_images=['arrow_contains_point'], extensions=['png'],
remove_text=True, style='mpl20')
def test_arrow_contains_point():
# fix bug (#8384)
fig, ax = plt.subplots()
ax.set_xlim((0, 2))
ax.set_ylim((0, 2))
# create an arrow with Curve style
arrow = patches.FancyArrowPatch((0.5, 0.25), (1.5, 0.75),
arrowstyle='->',
mutation_scale=40)
ax.add_patch(arrow)
# create an arrow with Bracket style
arrow1 = patches.FancyArrowPatch((0.5, 1), (1.5, 1.25),
arrowstyle=']-[',
mutation_scale=40)
ax.add_patch(arrow1)
# create an arrow with other arrow style
arrow2 = patches.FancyArrowPatch((0.5, 1.5), (1.5, 1.75),
arrowstyle='fancy',
fill=False,
mutation_scale=40)
ax.add_patch(arrow2)
patches_list = [arrow, arrow1, arrow2]
# generate some points
X, Y = np.meshgrid(np.arange(0, 2, 0.1),
np.arange(0, 2, 0.1))
for k, (x, y) in enumerate(zip(X.ravel(), Y.ravel())):
xdisp, ydisp = ax.transData.transform_point([x, y])
event = MouseEvent('button_press_event', fig.canvas, xdisp, ydisp)
for m, patch in enumerate(patches_list):
# set the points to red only if the arrow contains the point
inside, res = patch.contains(event)
if inside:
ax.scatter(x, y, s=5, c="r")
@image_comparison(baseline_images=['path_clipping'],
extensions=['svg'], remove_text=True)
def test_path_clipping():
fig = plt.figure(figsize=(6.0, 6.2))
for i, xy in enumerate([
[(200, 200), (200, 350), (400, 350), (400, 200)],
[(200, 200), (200, 350), (400, 350), (400, 100)],
[(200, 100), (200, 350), (400, 350), (400, 100)],
[(200, 100), (200, 415), (400, 350), (400, 100)],
[(200, 100), (200, 415), (400, 415), (400, 100)],
[(200, 415), (400, 415), (400, 100), (200, 100)],
[(400, 415), (400, 100), (200, 100), (200, 415)]]):
ax = fig.add_subplot(4, 2, i+1)
bbox = [0, 140, 640, 260]
ax.set_xlim(bbox[0], bbox[0] + bbox[2])
ax.set_ylim(bbox[1], bbox[1] + bbox[3])
ax.add_patch(Polygon(
xy, facecolor='none', edgecolor='red', closed=True))
@image_comparison(baseline_images=['semi_log_with_zero'], extensions=['png'],
style='mpl20')
def test_log_transform_with_zero():
x = np.arange(-10, 10)
y = (1.0 - 1.0/(x**2+1))**20
fig, ax = plt.subplots()
ax.semilogy(x, y, "-o", lw=15, markeredgecolor='k')
ax.set_ylim(1e-7, 1)
ax.grid(True)
def test_make_compound_path_empty():
# We should be able to make a compound path with no arguments.
# This makes it easier to write generic path based code.
r = Path.make_compound_path()
assert r.vertices.shape == (0, 2)
@image_comparison(baseline_images=['xkcd'], extensions=['png'],
remove_text=True)
def test_xkcd():
np.random.seed(0)
x = np.linspace(0, 2 * np.pi, 100)
y = np.sin(x)
with plt.xkcd():
fig, ax = plt.subplots()
ax.plot(x, y)
@image_comparison(baseline_images=['xkcd_marker'], extensions=['png'],
remove_text=True)
def test_xkcd_marker():
np.random.seed(0)
x = np.linspace(0, 5, 8)
y1 = x
y2 = 5 - x
y3 = 2.5 * np.ones(8)
with plt.xkcd():
fig, ax = plt.subplots()
ax.plot(x, y1, '+', ms=10)
ax.plot(x, y2, 'o', ms=10)
ax.plot(x, y3, '^', ms=10)
@image_comparison(baseline_images=['marker_paths'], extensions=['pdf'],
remove_text=True)
def test_marker_paths_pdf():
N = 7
plt.errorbar(np.arange(N),
np.ones(N) + 4,
np.ones(N))
plt.xlim(-1, N)
plt.ylim(-1, 7)
@image_comparison(baseline_images=['nan_path'], style='default',
remove_text=True, extensions=['pdf', 'svg', 'eps', 'png'])
def test_nan_isolated_points():
y0 = [0, np.nan, 2, np.nan, 4, 5, 6]
y1 = [np.nan, 7, np.nan, 9, 10, np.nan, 12]
fig, ax = plt.subplots()
ax.plot(y0, '-o')
ax.plot(y1, '-o')
def test_path_no_doubled_point_in_to_polygon():
hand = np.array(
[[1.64516129, 1.16145833],
[1.64516129, 1.59375],
[1.35080645, 1.921875],
[1.375, 2.18229167],
[1.68548387, 1.9375],
[1.60887097, 2.55208333],
[1.68548387, 2.69791667],
[1.76209677, 2.56770833],
[1.83064516, 1.97395833],
[1.89516129, 2.75],
[1.9516129, 2.84895833],
[2.01209677, 2.76041667],
[1.99193548, 1.99479167],
[2.11290323, 2.63020833],
[2.2016129, 2.734375],
[2.25403226, 2.60416667],
[2.14919355, 1.953125],
[2.30645161, 2.36979167],
[2.39112903, 2.36979167],
[2.41532258, 2.1875],
[2.1733871, 1.703125],
[2.07782258, 1.16666667]])
(r0, c0, r1, c1) = (1.0, 1.5, 2.1, 2.5)
poly = Path(np.vstack((hand[:, 1], hand[:, 0])).T, closed=True)
clip_rect = transforms.Bbox([[r0, c0], [r1, c1]])
poly_clipped = poly.clip_to_bbox(clip_rect).to_polygons()[0]
assert np.all(poly_clipped[-2] != poly_clipped[-1])
assert np.all(poly_clipped[-1] == poly_clipped[0])
def test_path_to_polygons():
data = [[10, 10], [20, 20]]
p = Path(data)
assert_array_equal(p.to_polygons(width=40, height=40), [])
assert_array_equal(p.to_polygons(width=40, height=40, closed_only=False),
[data])
assert_array_equal(p.to_polygons(), [])
assert_array_equal(p.to_polygons(closed_only=False), [data])
data = [[10, 10], [20, 20], [30, 30]]
closed_data = [[10, 10], [20, 20], [30, 30], [10, 10]]
p = Path(data)
assert_array_equal(p.to_polygons(width=40, height=40), [closed_data])
assert_array_equal(p.to_polygons(width=40, height=40, closed_only=False),
[data])
assert_array_equal(p.to_polygons(), [closed_data])
assert_array_equal(p.to_polygons(closed_only=False), [data])
def test_path_deepcopy():
# Should not raise any error
verts = [[0, 0], [1, 1]]
codes = [Path.MOVETO, Path.LINETO]
path1 = Path(verts)
path2 = Path(verts, codes)
copy.deepcopy(path1)
copy.deepcopy(path2)
def test_path_intersect_path():
# test for the range of intersection angles
base_angles = np.array([0, 15, 30, 45, 60, 75, 90, 105, 120, 135])
angles = np.concatenate([base_angles, base_angles + 1, base_angles - 1])
eps_array = [1e-5, 1e-8, 1e-10, 1e-12]
for phi in angles:
transform = transforms.Affine2D().rotate(np.deg2rad(phi))
# a and b intersect at angle phi
a = Path([(-2, 0), (2, 0)])
b = transform.transform_path(a)
assert a.intersects_path(b) and b.intersects_path(a)
# a and b touch at angle phi at (0, 0)
a = Path([(0, 0), (2, 0)])
b = transform.transform_path(a)
assert a.intersects_path(b) and b.intersects_path(a)
# a and b are orthogonal and intersect at (0, 3)
a = transform.transform_path(Path([(0, 1), (0, 3)]))
b = transform.transform_path(Path([(1, 3), (0, 3)]))
assert a.intersects_path(b) and b.intersects_path(a)
# a and b are collinear and intersect at (0, 3)
a = transform.transform_path(Path([(0, 1), (0, 3)]))
b = transform.transform_path(Path([(0, 5), (0, 3)]))
assert a.intersects_path(b) and b.intersects_path(a)
# self-intersect
assert a.intersects_path(a)
# a contains b
a = transform.transform_path(Path([(0, 0), (5, 5)]))
b = transform.transform_path(Path([(1, 1), (3, 3)]))
assert a.intersects_path(b) and b.intersects_path(a)
# a and b are collinear but do not intersect
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(3, 0), (3, 3)]))
assert not a.intersects_path(b) and not b.intersects_path(a)
# a and b are on the same line but do not intersect
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(0, 6), (0, 7)]))
assert not a.intersects_path(b) and not b.intersects_path(a)
# Note: 1e-13 is the absolute tolerance error used for
# `isclose` function from src/_path.h
# a and b are parallel but do not touch
for eps in eps_array:
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(0 + eps, 1), (0 + eps, 5)]))
assert not a.intersects_path(b) and not b.intersects_path(a)
# a and b are on the same line but do not intersect (really close)
for eps in eps_array:
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(0, 5 + eps), (0, 7)]))
assert not a.intersects_path(b) and not b.intersects_path(a)
# a and b are on the same line and intersect (really close)
for eps in eps_array:
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(0, 5 - eps), (0, 7)]))
assert a.intersects_path(b) and b.intersects_path(a)
# b is the same as a but with an extra point
a = transform.transform_path(Path([(0, 1), (0, 5)]))
b = transform.transform_path(Path([(0, 1), (0, 2), (0, 5)]))
assert a.intersects_path(b) and b.intersects_path(a)
return
@pytest.mark.parametrize('offset', range(-720, 361, 45))
def test_full_arc(offset):
low = offset
high = 360 + offset
path = Path.arc(low, high)
mins = np.min(path.vertices, axis=0)
maxs = np.max(path.vertices, axis=0)
np.testing.assert_allclose(mins, -1)
assert np.allclose(maxs, 1)
|
96e1dc8bf1e549a1fa20387ef3cf6e317eba23bbaafc629412980b476007e651
|
from io import BytesIO
import pickle
import platform
import numpy as np
import pytest
from matplotlib import cm
from matplotlib.testing.decorators import image_comparison
from matplotlib.dates import rrulewrapper
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
def test_simple():
fig = plt.figure()
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.subplot(121)
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.axes(projection='polar')
plt.plot(np.arange(10), label='foobar')
plt.legend()
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
# ax = plt.subplot(121, projection='hammer')
# pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
plt.figure()
plt.bar(x=np.arange(10), height=np.arange(10))
pickle.dump(plt.gca(), BytesIO(), pickle.HIGHEST_PROTOCOL)
fig = plt.figure()
ax = plt.axes()
plt.plot(np.arange(10))
ax.set_yscale('log')
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
@image_comparison(baseline_images=['multi_pickle'],
extensions=['png'], remove_text=True,
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
style='mpl20')
def test_complete():
fig = plt.figure('Figure with a label?', figsize=(10, 6))
plt.suptitle('Can you fit any more in a figure?')
# make some arbitrary data
x, y = np.arange(8), np.arange(10)
data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
v = np.sin(v * -0.6)
# Ensure lists also pickle correctly.
plt.subplot(3, 3, 1)
plt.plot(list(range(10)))
plt.subplot(3, 3, 2)
plt.contourf(data, hatches=['//', 'ooo'])
plt.colorbar()
plt.subplot(3, 3, 3)
plt.pcolormesh(data)
plt.subplot(3, 3, 4)
plt.imshow(data)
plt.subplot(3, 3, 5)
plt.pcolor(data)
ax = plt.subplot(3, 3, 6)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.streamplot(x, y, u, v)
ax = plt.subplot(3, 3, 7)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.quiver(x, y, u, v)
plt.subplot(3, 3, 8)
plt.scatter(x, x**2, label='$x^2$')
plt.legend(loc='upper left')
plt.subplot(3, 3, 9)
plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4)
#
# plotting is done, now test its pickle-ability
#
result_fh = BytesIO()
pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL)
plt.close('all')
# make doubly sure that there are no figures left
assert plt._pylab_helpers.Gcf.figs == {}
# wind back the fh and load in the figure
result_fh.seek(0)
fig = pickle.load(result_fh)
# make sure there is now a figure manager
assert plt._pylab_helpers.Gcf.figs != {}
assert fig.get_label() == 'Figure with a label?'
def test_no_pyplot():
# tests pickle-ability of a figure not created with pyplot
from matplotlib.backends.backend_pdf import FigureCanvasPdf
from matplotlib.figure import Figure
fig = Figure()
_ = FigureCanvasPdf(fig)
ax = fig.add_subplot(1, 1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
def test_renderer():
from matplotlib.backends.backend_agg import RendererAgg
renderer = RendererAgg(10, 20, 30)
pickle.dump(renderer, BytesIO())
def test_image():
# Prior to v1.4.0 the Image would cache data which was not picklable
# once it had been drawn.
from matplotlib.backends.backend_agg import new_figure_manager
manager = new_figure_manager(1000)
fig = manager.canvas.figure
ax = fig.add_subplot(1, 1, 1)
ax.imshow(np.arange(12).reshape(3, 4))
manager.canvas.draw()
pickle.dump(fig, BytesIO())
def test_polar():
ax = plt.subplot(111, polar=True)
fig = plt.gcf()
pf = pickle.dumps(fig)
pickle.loads(pf)
plt.draw()
class TransformBlob(object):
def __init__(self):
self.identity = mtransforms.IdentityTransform()
self.identity2 = mtransforms.IdentityTransform()
# Force use of the more complex composition.
self.composite = mtransforms.CompositeGenericTransform(
self.identity,
self.identity2)
# Check parent -> child links of TransformWrapper.
self.wrapper = mtransforms.TransformWrapper(self.composite)
# Check child -> parent links of TransformWrapper.
self.composite2 = mtransforms.CompositeGenericTransform(
self.wrapper,
self.identity)
def test_transform():
obj = TransformBlob()
pf = pickle.dumps(obj)
del obj
obj = pickle.loads(pf)
# Check parent -> child links of TransformWrapper.
assert obj.wrapper._child == obj.composite
# Check child -> parent links of TransformWrapper.
assert [v() for v in obj.wrapper._parents.values()] == [obj.composite2]
# Check input and output dimensions are set as expected.
assert obj.wrapper.input_dims == obj.composite.input_dims
assert obj.wrapper.output_dims == obj.composite.output_dims
def test_rrulewrapper():
r = rrulewrapper(2)
try:
pickle.loads(pickle.dumps(r))
except RecursionError:
print('rrulewrapper pickling test failed')
raise
def test_shared():
fig, axs = plt.subplots(2, sharex=True)
fig = pickle.loads(pickle.dumps(fig))
fig.axes[0].set_xlim(10, 20)
assert fig.axes[1].get_xlim() == (10, 20)
@pytest.mark.parametrize("cmap", cm.cmap_d.values())
def test_cmap(cmap):
pickle.dumps(cmap)
|
42da5d58f5285cbffd17c588cad0dcdfefa1d273e36e8f0f4e88a0eecbc84f56
|
from matplotlib.testing.conftest import (mpl_test_settings,
mpl_image_comparison_parameters,
pytest_configure, pytest_unconfigure,
pd)
|
b1f9edbc6554b82686a94dc374e9d3457aaad0fd5a7d5a96b9a57f644621f244
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
@image_comparison(baseline_images=['agg_filter_alpha'],
extensions=['png', 'pdf'])
def test_agg_filter_alpha():
ax = plt.axes()
x, y = np.mgrid[0:7, 0:8]
data = x**2 - y**2
mesh = ax.pcolormesh(data, cmap='Reds', zorder=5)
def manual_alpha(im, dpi):
im[:, :, 3] *= 0.6
print('CALLED')
return im, 0, 0
# Note: Doing alpha like this is not the same as setting alpha on
# the mesh itself. Currently meshes are drawn as independent patches,
# and we see fine borders around the blocks of color. See the SO
# question for an example: https://stackoverflow.com/questions/20678817
mesh.set_agg_filter(manual_alpha)
# Currently we must enable rasterization for this to have an effect in
# the PDF backend.
mesh.set_rasterized(True)
ax.plot([0, 4, 7], [1, 3, 8])
|
d5c37929eaae4a1546f370e081e531b7941613db8891f9b238d6a854d24ea23c
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
@image_comparison(baseline_images=['spines_axes_positions'])
def test_spines_axes_positions():
# SF bug 2852168
fig = plt.figure()
x = np.linspace(0, 2*np.pi, 100)
y = 2*np.sin(x)
ax = fig.add_subplot(1, 1, 1)
ax.set_title('centered spines')
ax.plot(x, y)
ax.spines['right'].set_position(('axes', 0.1))
ax.yaxis.set_ticks_position('right')
ax.spines['top'].set_position(('axes', 0.25))
ax.xaxis.set_ticks_position('top')
ax.spines['left'].set_color('none')
ax.spines['bottom'].set_color('none')
@image_comparison(baseline_images=['spines_data_positions'])
def test_spines_data_positions():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position(('data', -1.5))
ax.spines['top'].set_position(('data', 0.5))
ax.spines['right'].set_position(('data', -0.5))
ax.spines['bottom'].set_position('zero')
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
@image_comparison(baseline_images=['spines_capstyle'])
def test_spines_capstyle():
# issue 2542
plt.rc('axes', linewidth=20)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xticks([])
ax.set_yticks([])
def test_label_without_ticks():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.subplots_adjust(left=0.3, bottom=0.3)
ax.plot(np.arange(10))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('outward', 30))
ax.spines['right'].set_visible(False)
ax.set_ylabel('y label')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('outward', 30))
ax.spines['top'].set_visible(False)
ax.set_xlabel('x label')
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
plt.draw()
spine = ax.spines['left']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
assert ax.yaxis.label.get_position()[0] < spinebbox.xmin, \
"Y-Axis label not left of the spine"
spine = ax.spines['bottom']
spinebbox = spine.get_transform().transform_path(
spine.get_path()).get_extents()
assert ax.xaxis.label.get_position()[1] < spinebbox.ymin, \
"X-Axis label not below the spine"
|
2f9862fd87902651c307294d44c48e78cde07e5c538207108108cfbb65ac9fb8
|
from io import BytesIO
import glob
import os
import numpy as np
import pytest
from matplotlib.testing.decorators import image_comparison
from matplotlib import pyplot as plt
import matplotlib.cm as cm
@image_comparison(baseline_images=['pngsuite'], extensions=['png'],
tol=0.03)
def test_pngsuite():
dirname = os.path.join(
os.path.dirname(__file__),
'baseline_images',
'pngsuite')
files = sorted(glob.iglob(os.path.join(dirname, 'basn*.png')))
fig = plt.figure(figsize=(len(files), 2))
for i, fname in enumerate(files):
data = plt.imread(fname)
cmap = None # use default colormap
if data.ndim == 2:
# keep grayscale images gray
cmap = cm.gray
plt.imshow(data, extent=[i, i + 1, 0, 1], cmap=cmap)
plt.gca().patch.set_facecolor("#ddffff")
plt.gca().set_xlim(0, len(files))
def test_imread_png_uint16():
from matplotlib import _png
img = _png.read_png_int(os.path.join(os.path.dirname(__file__),
'baseline_images/test_png/uint16.png'))
assert (img.dtype == np.uint16)
assert np.sum(img.flatten()) == 134184960
def test_truncated_file(tmpdir):
d = tmpdir.mkdir('test')
fname = str(d.join('test.png'))
fname_t = str(d.join('test_truncated.png'))
plt.savefig(fname)
with open(fname, 'rb') as fin:
buf = fin.read()
with open(fname_t, 'wb') as fout:
fout.write(buf[:20])
with pytest.raises(Exception):
plt.imread(fname_t)
def test_truncated_buffer():
b = BytesIO()
plt.savefig(b)
b.seek(0)
b2 = BytesIO(b.read(20))
b2.seek(0)
with pytest.raises(Exception):
plt.imread(b2)
|
9f7528ba489dd06723c90a5de99aea218f6edd12cd1365c03d6c2d70722caada
|
import os
import shutil
import pytest
from pytest import approx
from matplotlib.testing.compare import compare_images
from matplotlib.testing.decorators import _image_directories
baseline_dir, result_dir = _image_directories(lambda: 'dummy func')
# Tests of the image comparison algorithm.
@pytest.mark.parametrize(
'im1, im2, tol, expect_rms',
[
# Comparison of an image and the same image with minor differences.
# This expects the images to compare equal under normal tolerance, and
# have a small RMS.
('basn3p02.png', 'basn3p02-minorchange.png', 10, None),
# Now test with no tolerance.
('basn3p02.png', 'basn3p02-minorchange.png', 0, 6.50646),
# Comparison with an image that is shifted by 1px in the X axis.
('basn3p02.png', 'basn3p02-1px-offset.png', 0, 90.15611),
# Comparison with an image with half the pixels shifted by 1px in the X
# axis.
('basn3p02.png', 'basn3p02-half-1px-offset.png', 0, 63.75),
# Comparison of an image and the same image scrambled.
# This expects the images to compare completely different, with a very
# large RMS.
# Note: The image has been scrambled in a specific way, by having
# each color component of each pixel randomly placed somewhere in the
# image. It contains exactly the same number of pixels of each color
# value of R, G and B, but in a totally different position.
# Test with no tolerance to make sure that we pick up even a very small
# RMS error.
('basn3p02.png', 'basn3p02-scrambled.png', 0, 172.63582),
# Comparison of an image and a slightly brighter image.
# The two images are solid color, with the second image being exactly 1
# color value brighter.
# This expects the images to compare equal under normal tolerance, and
# have an RMS of exactly 1.
('all127.png', 'all128.png', 0, 1),
# Now test the reverse comparison.
('all128.png', 'all127.png', 0, 1),
])
def test_image_comparison_expect_rms(im1, im2, tol, expect_rms):
"""Compare two images, expecting a particular RMS error.
im1 and im2 are filenames relative to the baseline_dir directory.
tol is the tolerance to pass to compare_images.
expect_rms is the expected RMS value, or None. If None, the test will
succeed if compare_images succeeds. Otherwise, the test will succeed if
compare_images fails and returns an RMS error almost equal to this value.
"""
im1 = os.path.join(baseline_dir, im1)
im2_src = os.path.join(baseline_dir, im2)
im2 = os.path.join(result_dir, im2)
# Move im2 from baseline_dir to result_dir. This will ensure that
# compare_images writes the diff file to result_dir, instead of trying to
# write to the (possibly read-only) baseline_dir.
shutil.copyfile(im2_src, im2)
results = compare_images(im1, im2, tol=tol, in_decorator=True)
if expect_rms is None:
assert results is None
else:
assert results is not None
assert results['rms'] == approx(expect_rms, abs=1e-4)
|
d42b4b8304fa15fe31874417624297535e18cdd157421cb6b06d62a88df179d3
|
from unittest.mock import MagicMock
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
import matplotlib.units as munits
import numpy as np
import platform
import pytest
# Basic class that wraps numpy array and has units
class Quantity(object):
def __init__(self, data, units):
self.magnitude = data
self.units = units
def to(self, new_units):
factors = {('hours', 'seconds'): 3600, ('minutes', 'hours'): 1 / 60,
('minutes', 'seconds'): 60, ('feet', 'miles'): 1 / 5280.,
('feet', 'inches'): 12, ('miles', 'inches'): 12 * 5280}
if self.units != new_units:
mult = factors[self.units, new_units]
return Quantity(mult * self.magnitude, new_units)
else:
return Quantity(self.magnitude, self.units)
def __getattr__(self, attr):
return getattr(self.magnitude, attr)
def __getitem__(self, item):
if np.iterable(self.magnitude):
return Quantity(self.magnitude[item], self.units)
else:
return Quantity(self.magnitude, self.units)
def __array__(self):
return np.asarray(self.magnitude)
@pytest.fixture
def quantity_converter():
# Create an instance of the conversion interface and
# mock so we can check methods called
qc = munits.ConversionInterface()
def convert(value, unit, axis):
if hasattr(value, 'units'):
return value.to(unit).magnitude
elif np.iterable(value):
try:
return [v.to(unit).magnitude for v in value]
except AttributeError:
return [Quantity(v, axis.get_units()).to(unit).magnitude
for v in value]
else:
return Quantity(value, axis.get_units()).to(unit).magnitude
def default_units(value, axis):
if hasattr(value, 'units'):
return value.units
elif np.iterable(value):
for v in value:
if hasattr(v, 'units'):
return v.units
return None
qc.convert = MagicMock(side_effect=convert)
qc.axisinfo = MagicMock(side_effect=lambda u, a: munits.AxisInfo(label=u))
qc.default_units = MagicMock(side_effect=default_units)
return qc
# Tests that the conversion machinery works properly for classes that
# work as a facade over numpy arrays (like pint)
@image_comparison(baseline_images=['plot_pint'],
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
extensions=['png'], remove_text=False, style='mpl20')
def test_numpy_facade(quantity_converter):
# Register the class
munits.registry[Quantity] = quantity_converter
# Simple test
y = Quantity(np.linspace(0, 30), 'miles')
x = Quantity(np.linspace(0, 5), 'hours')
fig, ax = plt.subplots()
fig.subplots_adjust(left=0.15) # Make space for label
ax.plot(x, y, 'tab:blue')
ax.axhline(Quantity(26400, 'feet'), color='tab:red')
ax.axvline(Quantity(120, 'minutes'), color='tab:green')
ax.yaxis.set_units('inches')
ax.xaxis.set_units('seconds')
assert quantity_converter.convert.called
assert quantity_converter.axisinfo.called
assert quantity_converter.default_units.called
# Tests gh-8908
@image_comparison(baseline_images=['plot_masked_units'],
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
extensions=['png'], remove_text=True, style='mpl20')
def test_plot_masked_units():
data = np.linspace(-5, 5)
data_masked = np.ma.array(data, mask=(data > -2) & (data < 2))
data_masked_units = Quantity(data_masked, 'meters')
fig, ax = plt.subplots()
ax.plot(data_masked_units)
def test_empty_set_limits_with_units(quantity_converter):
# Register the class
munits.registry[Quantity] = quantity_converter
fig, ax = plt.subplots()
ax.set_xlim(Quantity(-1, 'meters'), Quantity(6, 'meters'))
ax.set_ylim(Quantity(-1, 'hours'), Quantity(16, 'hours'))
@image_comparison(baseline_images=['jpl_bar_units'], extensions=['png'],
savefig_kwarg={'dpi': 120}, style='mpl20')
def test_jpl_bar_units():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
day = units.Duration("ET", 24.0 * 60.0 * 60.0)
x = [0*units.km, 1*units.km, 2*units.km]
w = [1*day, 2*day, 3*day]
b = units.Epoch("ET", dt=datetime(2009, 4, 25))
fig, ax = plt.subplots()
ax.bar(x, w, bottom=b)
ax.set_ylim([b-1*day, b+w[-1]+1*day])
@image_comparison(baseline_images=['jpl_barh_units'], extensions=['png'],
savefig_kwarg={'dpi': 120}, style='mpl20')
def test_jpl_barh_units():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
day = units.Duration("ET", 24.0 * 60.0 * 60.0)
x = [0*units.km, 1*units.km, 2*units.km]
w = [1*day, 2*day, 3*day]
b = units.Epoch("ET", dt=datetime(2009, 4, 25))
fig, ax = plt.subplots()
ax.barh(x, w, left=b)
ax.set_xlim([b-1*day, b+w[-1]+1*day])
def test_empty_arrays():
# Check that plotting an empty array with a dtype works
plt.scatter(np.array([], dtype='datetime64[ns]'), np.array([]))
|
a604036fddaee11cfc38d45525c59d24b8aa57166ecb7271d9d5617114f02447
|
"""
Tests specific to the patches module.
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
import pytest
from matplotlib.cbook import MatplotlibDeprecationWarning
from matplotlib.patches import Polygon, Rectangle, FancyArrowPatch
from matplotlib.testing.decorators import image_comparison, check_figures_equal
import matplotlib.pyplot as plt
from matplotlib import (
collections as mcollections, colors as mcolors, patches as mpatches,
path as mpath, style as mstyle, transforms as mtransforms)
import sys
on_win = (sys.platform == 'win32')
def test_Polygon_close():
#: Github issue #1018 identified a bug in the Polygon handling
#: of the closed attribute; the path was not getting closed
#: when set_xy was used to set the vertices.
# open set of vertices:
xy = [[0, 0], [0, 1], [1, 1]]
# closed set:
xyclosed = xy + [[0, 0]]
# start with open path and close it:
p = Polygon(xy, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xyclosed)
# start with closed path and open it:
p = Polygon(xyclosed, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xy)
# start with open path and leave it open:
p = Polygon(xy, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xy)
# start with closed path and leave it closed:
p = Polygon(xyclosed, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xyclosed)
def test_rotate_rect():
loc = np.asarray([1.0, 2.0])
width = 2
height = 3
angle = 30.0
# A rotated rectangle
rect1 = Rectangle(loc, width, height, angle=angle)
# A non-rotated rectangle
rect2 = Rectangle(loc, width, height)
# Set up an explicit rotation matrix (in radians)
angle_rad = np.pi * angle / 180.0
rotation_matrix = np.array([[np.cos(angle_rad), -np.sin(angle_rad)],
[np.sin(angle_rad), np.cos(angle_rad)]])
# Translate to origin, rotate each vertex, and then translate back
new_verts = np.inner(rotation_matrix, rect2.get_verts() - loc).T + loc
# They should be the same
assert_almost_equal(rect1.get_verts(), new_verts)
def test_negative_rect():
# These two rectangles have the same vertices, but starting from a
# different point. (We also drop the last vertex, which is a duplicate.)
pos_vertices = Rectangle((-3, -2), 3, 2).get_verts()[:-1]
neg_vertices = Rectangle((0, 0), -3, -2).get_verts()[:-1]
assert_array_equal(np.roll(neg_vertices, 2, 0), pos_vertices)
@image_comparison(baseline_images=['clip_to_bbox'])
def test_clip_to_bbox():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-18, 20])
ax.set_ylim([-150, 100])
path = mpath.Path.unit_regular_star(8).deepcopy()
path.vertices *= [10, 100]
path.vertices -= [5, 25]
path2 = mpath.Path.unit_circle().deepcopy()
path2.vertices *= [10, 100]
path2.vertices += [10, -25]
combined = mpath.Path.make_compound_path(path, path2)
patch = mpatches.PathPatch(
combined, alpha=0.5, facecolor='coral', edgecolor='none')
ax.add_patch(patch)
bbox = mtransforms.Bbox([[-12, -77.5], [50, -110]])
result_path = combined.clip_to_bbox(bbox)
result_patch = mpatches.PathPatch(
result_path, alpha=0.5, facecolor='green', lw=4, edgecolor='black')
ax.add_patch(result_patch)
@image_comparison(baseline_images=['patch_alpha_coloring'], remove_text=True)
def test_patch_alpha_coloring():
"""
Test checks that the patch and collection are rendered with the specified
alpha values in their facecolor and edgecolor.
"""
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@image_comparison(baseline_images=['patch_alpha_override'], remove_text=True)
def test_patch_alpha_override():
#: Test checks that specifying an alpha attribute for a patch or
#: collection will override any alpha component of the facecolor
#: or edgecolor.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@pytest.mark.style('default')
def test_patch_color_none():
# Make sure the alpha kwarg does not override 'none' facecolor.
# Addresses issue #7478.
c = plt.Circle((0, 0), 1, facecolor='none', alpha=1)
assert c.get_facecolor()[0] == 0
@image_comparison(baseline_images=['patch_custom_linestyle'],
remove_text=True)
def test_patch_custom_linestyle():
#: A test to check that patches and collections accept custom dash
#: patterns as linestyle and that they display correctly.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle=(0.0, (5.0, 7.0, 10.0, 7.0)),
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles=[(0.0, (5.0, 7.0, 10.0, 7.0))],
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
def test_patch_linestyle_accents():
#: Test if linestyle can also be specified with short mnemonics like "--"
#: c.f. Github issue #2136
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
linestyles = ["-", "--", "-.", ":",
"solid", "dashed", "dashdot", "dotted"]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i, ls in enumerate(linestyles):
star = mpath.Path(verts + i, codes)
patch = mpatches.PathPatch(star,
linewidth=3, linestyle=ls,
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_patch(patch)
ax.set_xlim([-1, i + 1])
ax.set_ylim([-1, i + 1])
fig.canvas.draw()
assert True
def test_wedge_movement():
param_dict = {'center': ((0, 0), (1, 1), 'set_center'),
'r': (5, 8, 'set_radius'),
'width': (2, 3, 'set_width'),
'theta1': (0, 30, 'set_theta1'),
'theta2': (45, 50, 'set_theta2')}
init_args = {k: v[0] for k, v in param_dict.items()}
w = mpatches.Wedge(**init_args)
for attr, (old_v, new_v, func) in param_dict.items():
assert getattr(w, attr) == old_v
getattr(w, func)(new_v)
assert getattr(w, attr) == new_v
# png needs tol>=0.06, pdf tol>=1.617
@image_comparison(baseline_images=['wedge_range'],
remove_text=True, tol=1.65 if on_win else 0)
def test_wedge_range():
ax = plt.axes()
t1 = 2.313869244286224
args = [[52.31386924, 232.31386924],
[52.313869244286224, 232.31386924428622],
[t1, t1 + 180.0],
[0, 360],
[90, 90 + 360],
[-180, 180],
[0, 380],
[45, 46],
[46, 45]]
for i, (theta1, theta2) in enumerate(args):
x = i % 3
y = i // 3
wedge = mpatches.Wedge((x * 3, y * 3), 1, theta1, theta2,
facecolor='none', edgecolor='k', lw=3)
ax.add_artist(wedge)
ax.set_xlim([-2, 8])
ax.set_ylim([-2, 9])
def test_patch_str():
"""
Check that patches have nice and working `str` representation.
Note that the logic is that `__str__` is defined such that:
str(eval(str(p))) == str(p)
"""
p = mpatches.Circle(xy=(1, 2), radius=3)
assert str(p) == 'Circle(xy=(1, 2), radius=3)'
p = mpatches.Ellipse(xy=(1, 2), width=3, height=4, angle=5)
assert str(p) == 'Ellipse(xy=(1, 2), width=3, height=4, angle=5)'
p = mpatches.Rectangle(xy=(1, 2), width=3, height=4, angle=5)
assert str(p) == 'Rectangle(xy=(1, 2), width=3, height=4, angle=5)'
p = mpatches.Wedge(center=(1, 2), r=3, theta1=4, theta2=5, width=6)
assert str(p) == 'Wedge(center=(1, 2), r=3, theta1=4, theta2=5, width=6)'
p = mpatches.Arc(xy=(1, 2), width=3, height=4, angle=5, theta1=6, theta2=7)
expected = 'Arc(xy=(1, 2), width=3, height=4, angle=5, theta1=6, theta2=7)'
assert str(p) == expected
p = mpatches.RegularPolygon((1, 2), 20, radius=5)
assert str(p) == "RegularPolygon((1, 2), 20, radius=5, orientation=0)"
p = mpatches.CirclePolygon(xy=(1, 2), radius=5, resolution=20)
assert str(p) == "CirclePolygon((1, 2), radius=5, resolution=20)"
p = mpatches.FancyBboxPatch((1, 2), width=3, height=4)
assert str(p) == "FancyBboxPatch((1, 2), width=3, height=4)"
# Further nice __str__ which cannot be `eval`uated:
path_data = [([1, 2], mpath.Path.MOVETO), ([2, 2], mpath.Path.LINETO),
([1, 2], mpath.Path.CLOSEPOLY)]
p = mpatches.PathPatch(mpath.Path(*zip(*path_data)))
assert str(p) == "PathPatch3((1, 2) ...)"
data = [[1, 2], [2, 2], [1, 2]]
p = mpatches.Polygon(data)
assert str(p) == "Polygon3((1, 2) ...)"
p = mpatches.FancyArrowPatch(path=mpath.Path(*zip(*path_data)))
assert str(p)[:27] == "FancyArrowPatch(Path(array("
p = mpatches.FancyArrowPatch((1, 2), (3, 4))
assert str(p) == "FancyArrowPatch((1, 2)->(3, 4))"
p = mpatches.ConnectionPatch((1, 2), (3, 4), 'data')
assert str(p) == "ConnectionPatch((1, 2), (3, 4))"
s = mpatches.Shadow(p, 1, 1)
assert str(s) == "Shadow(ConnectionPatch((1, 2), (3, 4)))"
with pytest.warns(MatplotlibDeprecationWarning):
p = mpatches.YAArrow(plt.gcf(), (1, 0), (2, 1), width=0.1)
assert str(p) == "YAArrow()"
# Not testing Arrow, FancyArrow here
# because they seem to exist only for historical reasons.
@image_comparison(baseline_images=['multi_color_hatch'],
remove_text=True, style='default')
def test_multi_color_hatch():
fig, ax = plt.subplots()
rects = ax.bar(range(5), range(1, 6))
for i, rect in enumerate(rects):
rect.set_facecolor('none')
rect.set_edgecolor('C{}'.format(i))
rect.set_hatch('/')
for i in range(5):
with mstyle.context({'hatch.color': 'C{}'.format(i)}):
r = Rectangle((i - .8 / 2, 5), .8, 1, hatch='//', fc='none')
ax.add_patch(r)
@image_comparison(baseline_images=['units_rectangle'], extensions=['png'])
def test_units_rectangle():
import matplotlib.testing.jpl_units as U
U.register()
p = mpatches.Rectangle((5*U.km, 6*U.km), 1*U.km, 2*U.km)
fig, ax = plt.subplots()
ax.add_patch(p)
ax.set_xlim([4*U.km, 7*U.km])
ax.set_ylim([5*U.km, 9*U.km])
@image_comparison(baseline_images=['connection_patch'], extensions=['png'],
style='mpl20', remove_text=True)
def test_connection_patch():
fig, (ax1, ax2) = plt.subplots(1, 2)
con = mpatches.ConnectionPatch(xyA=(0.1, 0.1), xyB=(0.9, 0.9),
coordsA='data', coordsB='data',
axesA=ax2, axesB=ax1,
arrowstyle="->")
ax2.add_artist(con)
xyA = (0.6, 1.0) # in axes coordinates
xyB = (0.0, 0.2) # x in axes coordinates, y in data coordinates
coordsA = "axes fraction"
coordsB = ax2.get_yaxis_transform()
con = mpatches.ConnectionPatch(xyA=xyA, xyB=xyB, coordsA=coordsA,
coordsB=coordsB, arrowstyle="-")
ax2.add_artist(con)
def test_datetime_rectangle():
# Check that creating a rectangle with timedeltas doesn't fail
from datetime import datetime, timedelta
start = datetime(2017, 1, 1, 0, 0, 0)
delta = timedelta(seconds=16)
patch = mpatches.Rectangle((start, 0), delta, 1)
fig, ax = plt.subplots()
ax.add_patch(patch)
def test_datetime_datetime_fails():
from datetime import datetime
start = datetime(2017, 1, 1, 0, 0, 0)
dt_delta = datetime(1970, 1, 5) # Will be 5 days if units are done wrong
with pytest.raises(TypeError):
mpatches.Rectangle((start, 0), dt_delta, 1)
with pytest.raises(TypeError):
mpatches.Rectangle((0, start), 1, dt_delta)
def test_contains_point():
ell = mpatches.Ellipse((0.5, 0.5), 0.5, 1.0, 0)
points = [(0.0, 0.5), (0.2, 0.5), (0.25, 0.5), (0.5, 0.5)]
path = ell.get_path()
transform = ell.get_transform()
radius = ell._process_radius(None)
expected = np.array([path.contains_point(point,
transform,
radius) for point in points])
result = np.array([ell.contains_point(point) for point in points])
assert np.all(result == expected)
def test_contains_points():
ell = mpatches.Ellipse((0.5, 0.5), 0.5, 1.0, 0)
points = [(0.0, 0.5), (0.2, 0.5), (0.25, 0.5), (0.5, 0.5)]
path = ell.get_path()
transform = ell.get_transform()
radius = ell._process_radius(None)
expected = path.contains_points(points, transform, radius)
result = ell.contains_points(points)
assert np.all(result == expected)
# Currently fails with pdf/svg, probably because some parts assume a dpi of 72.
@check_figures_equal(extensions=["png"])
def test_shadow(fig_test, fig_ref):
xy = np.array([.2, .3])
dxy = np.array([.1, .2])
# We need to work around the nonsensical (dpi-dependent) interpretation of
# offsets by the Shadow class...
plt.rcParams["savefig.dpi"] = "figure"
# Test image.
a1 = fig_test.subplots()
rect = mpatches.Rectangle(xy=xy, width=.5, height=.5)
shadow = mpatches.Shadow(rect, ox=dxy[0], oy=dxy[1])
a1.add_patch(rect)
a1.add_patch(shadow)
# Reference image.
a2 = fig_ref.subplots()
rect = mpatches.Rectangle(xy=xy, width=.5, height=.5)
shadow = mpatches.Rectangle(
xy=xy + fig_ref.dpi / 72 * dxy, width=.5, height=.5,
fc=np.asarray(mcolors.to_rgb(rect.get_facecolor())) * .3,
ec=np.asarray(mcolors.to_rgb(rect.get_facecolor())) * .3,
alpha=.5)
a2.add_patch(shadow)
a2.add_patch(rect)
def test_fancyarrow_units():
from datetime import datetime
# Smoke test to check that FancyArrowPatch works with units
dtime = datetime(2000, 1, 1)
fig, ax = plt.subplots()
arrow = FancyArrowPatch((0, dtime), (0.01, dtime))
ax.add_patch(arrow)
|
25720b872b0bcdea72ccc52617b48fcf0051062e4de00ba57c8afb83288852ba
|
import numpy as np
from io import BytesIO
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.patches as mpatches
from matplotlib.ticker import FuncFormatter
@image_comparison(baseline_images=['bbox_inches_tight'], remove_text=True,
savefig_kwarg=dict(bbox_inches='tight'))
def test_bbox_inches_tight():
#: Test that a figure saved using bbox_inches='tight' is clipped correctly
data = [[66386, 174296, 75131, 577908, 32015],
[58230, 381139, 78045, 99308, 160454],
[89135, 80552, 152558, 497981, 603535],
[78415, 81858, 150656, 193263, 69638],
[139361, 331509, 343164, 781380, 52269]]
colLabels = rowLabels = [''] * 5
rows = len(data)
ind = np.arange(len(colLabels)) + 0.3 # the x locations for the groups
cellText = []
width = 0.4 # the width of the bars
yoff = np.zeros(len(colLabels))
# the bottom values for stacked bar chart
fig, ax = plt.subplots(1, 1)
for row in range(rows):
ax.bar(ind, data[row], width, bottom=yoff, align='edge', color='b')
yoff = yoff + data[row]
cellText.append([''])
plt.xticks([])
plt.legend([''] * 5, loc=(1.2, 0.2))
# Add a table at the bottom of the axes
cellText.reverse()
the_table = plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, loc='bottom')
@image_comparison(baseline_images=['bbox_inches_tight_suptile_legend'],
remove_text=False, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_suptile_legend():
plt.plot(np.arange(10), label='a straight line')
plt.legend(bbox_to_anchor=(0.9, 1), loc='upper left')
plt.title('Axis title')
plt.suptitle('Figure title')
# put an extra long y tick on to see that the bbox is accounted for
def y_formatter(y, pos):
if int(y) == 4:
return 'The number 4'
else:
return str(y)
plt.gca().yaxis.set_major_formatter(FuncFormatter(y_formatter))
plt.xlabel('X axis')
@image_comparison(baseline_images=['bbox_inches_tight_clipping'],
remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_clipping():
# tests bbox clipping on scatter points, and path clipping on a patch
# to generate an appropriately tight bbox
plt.scatter(np.arange(10), np.arange(10))
ax = plt.gca()
ax.set_xlim([0, 5])
ax.set_ylim([0, 5])
# make a massive rectangle and clip it with a path
patch = mpatches.Rectangle([-50, -50], 100, 100,
transform=ax.transData,
facecolor='blue', alpha=0.5)
path = mpath.Path.unit_regular_star(5).deepcopy()
path.vertices *= 0.25
patch.set_clip_path(path, transform=ax.transAxes)
plt.gcf().artists.append(patch)
@image_comparison(baseline_images=['bbox_inches_tight_raster'],
remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_raster():
"""Test rasterization with tight_layout"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1.0, 2.0], rasterized=True)
def test_only_on_non_finite_bbox():
fig, ax = plt.subplots()
ax.annotate("", xy=(0, float('nan')))
ax.set_axis_off()
# we only need to test that it does not error out on save
fig.savefig(BytesIO(), bbox_inches='tight', format='png')
|
6fcaf633f30f44bd0a44f77e58b140bbc98bfe890e13323a9b7760dc9de1d3eb
|
import numpy as np
import pytest
from matplotlib import cm
import matplotlib.colors as mcolors
from matplotlib import rc_context
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib.colors import (BoundaryNorm, LogNorm, PowerNorm, Normalize,
DivergingNorm)
from matplotlib.colorbar import ColorbarBase, _ColorbarLogLocator
from matplotlib.ticker import LogLocator, LogFormatter, FixedLocator
def _get_cmap_norms():
"""
Define a colormap and appropriate norms for each of the four
possible settings of the extend keyword.
Helper function for _colorbar_extension_shape and
colorbar_extension_length.
"""
# Create a color map and specify the levels it represents.
cmap = cm.get_cmap("RdBu", lut=5)
clevs = [-5., -2.5, -.5, .5, 1.5, 3.5]
# Define norms for the color maps.
norms = dict()
norms['neither'] = BoundaryNorm(clevs, len(clevs) - 1)
norms['min'] = BoundaryNorm([-10] + clevs[1:], len(clevs) - 1)
norms['max'] = BoundaryNorm(clevs[:-1] + [10], len(clevs) - 1)
norms['both'] = BoundaryNorm([-10] + clevs[1:-1] + [10], len(clevs) - 1)
return cmap, norms
def _colorbar_extension_shape(spacing):
'''
Produce 4 colorbars with rectangular extensions for either uniform
or proportional spacing.
Helper function for test_colorbar_extension_shape.
'''
# Get a colormap and appropriate norms for each extension type.
cmap, norms = _get_cmap_norms()
# Create a figure and adjust whitespace for subplots.
fig = plt.figure()
fig.subplots_adjust(hspace=4)
for i, extension_type in enumerate(('neither', 'min', 'max', 'both')):
# Get the appropriate norm and use it to get colorbar boundaries.
norm = norms[extension_type]
boundaries = values = norm.boundaries
# Create a subplot.
cax = fig.add_subplot(4, 1, i + 1)
# Generate the colorbar.
cb = ColorbarBase(cax, cmap=cmap, norm=norm,
boundaries=boundaries, values=values,
extend=extension_type, extendrect=True,
orientation='horizontal', spacing=spacing)
# Turn off text and ticks.
cax.tick_params(left=False, labelleft=False,
bottom=False, labelbottom=False)
# Return the figure to the caller.
return fig
def _colorbar_extension_length(spacing):
'''
Produce 12 colorbars with variable length extensions for either
uniform or proportional spacing.
Helper function for test_colorbar_extension_length.
'''
# Get a colormap and appropriate norms for each extension type.
cmap, norms = _get_cmap_norms()
# Create a figure and adjust whitespace for subplots.
fig = plt.figure()
fig.subplots_adjust(hspace=.6)
for i, extension_type in enumerate(('neither', 'min', 'max', 'both')):
# Get the appropriate norm and use it to get colorbar boundaries.
norm = norms[extension_type]
boundaries = values = norm.boundaries
for j, extendfrac in enumerate((None, 'auto', 0.1)):
# Create a subplot.
cax = fig.add_subplot(12, 1, i*3 + j + 1)
# Generate the colorbar.
ColorbarBase(cax, cmap=cmap, norm=norm,
boundaries=boundaries, values=values,
extend=extension_type, extendfrac=extendfrac,
orientation='horizontal', spacing=spacing)
# Turn off text and ticks.
cax.tick_params(left=False, labelleft=False,
bottom=False, labelbottom=False)
# Return the figure to the caller.
return fig
@image_comparison(
baseline_images=['colorbar_extensions_shape_uniform',
'colorbar_extensions_shape_proportional'],
extensions=['png'])
def test_colorbar_extension_shape():
'''Test rectangular colorbar extensions.'''
# Create figures for uniform and proportionally spaced colorbars.
_colorbar_extension_shape('uniform')
_colorbar_extension_shape('proportional')
@image_comparison(baseline_images=['colorbar_extensions_uniform',
'colorbar_extensions_proportional'],
extensions=['png'])
def test_colorbar_extension_length():
'''Test variable length colorbar extensions.'''
# Create figures for uniform and proportionally spaced colorbars.
_colorbar_extension_length('uniform')
_colorbar_extension_length('proportional')
@image_comparison(baseline_images=['cbar_with_orientation',
'cbar_locationing',
'double_cbar',
'cbar_sharing',
],
extensions=['png'], remove_text=True,
savefig_kwarg={'dpi': 40})
def test_colorbar_positioning():
data = np.arange(1200).reshape(30, 40)
levels = [0, 200, 400, 600, 800, 1000, 1200]
# -------------------
plt.figure()
plt.contourf(data, levels=levels)
plt.colorbar(orientation='horizontal', use_gridspec=False)
locations = ['left', 'right', 'top', 'bottom']
plt.figure()
for i, location in enumerate(locations):
plt.subplot(2, 2, i + 1)
plt.contourf(data, levels=levels)
plt.colorbar(location=location, use_gridspec=False)
# -------------------
plt.figure()
# make some other data (random integers)
data_2nd = np.array([[2, 3, 2, 3], [1.5, 2, 2, 3], [2, 3, 3, 4]])
# make the random data expand to the shape of the main data
data_2nd = np.repeat(np.repeat(data_2nd, 10, axis=1), 10, axis=0)
color_mappable = plt.contourf(data, levels=levels, extend='both')
# test extend frac here
hatch_mappable = plt.contourf(data_2nd, levels=[1, 2, 3], colors='none',
hatches=['/', 'o', '+'], extend='max')
plt.contour(hatch_mappable, colors='black')
plt.colorbar(color_mappable, location='left', label='variable 1',
use_gridspec=False)
plt.colorbar(hatch_mappable, location='right', label='variable 2',
use_gridspec=False)
# -------------------
plt.figure()
ax1 = plt.subplot(211, anchor='NE', aspect='equal')
plt.contourf(data, levels=levels)
ax2 = plt.subplot(223)
plt.contourf(data, levels=levels)
ax3 = plt.subplot(224)
plt.contourf(data, levels=levels)
plt.colorbar(ax=[ax2, ax3, ax1], location='right', pad=0.0, shrink=0.5,
panchor=False, use_gridspec=False)
plt.colorbar(ax=[ax2, ax3, ax1], location='left', shrink=0.5,
panchor=False, use_gridspec=False)
plt.colorbar(ax=[ax1], location='bottom', panchor=False,
anchor=(0.8, 0.5), shrink=0.6, use_gridspec=False)
@image_comparison(baseline_images=['cbar_with_subplots_adjust'],
extensions=['png'], remove_text=True,
savefig_kwarg={'dpi': 40})
def test_gridspec_make_colorbar():
plt.figure()
data = np.arange(1200).reshape(30, 40)
levels = [0, 200, 400, 600, 800, 1000, 1200]
plt.subplot(121)
plt.contourf(data, levels=levels)
plt.colorbar(use_gridspec=True, orientation='vertical')
plt.subplot(122)
plt.contourf(data, levels=levels)
plt.colorbar(use_gridspec=True, orientation='horizontal')
plt.subplots_adjust(top=0.95, right=0.95, bottom=0.2, hspace=0.25)
@image_comparison(baseline_images=['colorbar_single_scatter'],
extensions=['png'], remove_text=True,
savefig_kwarg={'dpi': 40})
def test_colorbar_single_scatter():
# Issue #2642: if a path collection has only one entry,
# the norm scaling within the colorbar must ensure a
# finite range, otherwise a zero denominator will occur in _locate.
plt.figure()
x = y = [0]
z = [50]
cmap = plt.get_cmap('jet', 16)
cs = plt.scatter(x, y, z, c=z, cmap=cmap)
plt.colorbar(cs)
@pytest.mark.parametrize('use_gridspec', [False, True],
ids=['no gridspec', 'with gridspec'])
def test_remove_from_figure(use_gridspec):
"""
Test `remove_from_figure` with the specified ``use_gridspec`` setting
"""
fig, ax = plt.subplots()
sc = ax.scatter([1, 2], [3, 4], cmap="spring")
sc.set_array(np.array([5, 6]))
pre_figbox = np.array(ax.figbox)
cb = fig.colorbar(sc, use_gridspec=use_gridspec)
fig.subplots_adjust()
cb.remove()
fig.subplots_adjust()
post_figbox = np.array(ax.figbox)
assert (pre_figbox == post_figbox).all()
def test_colorbarbase():
# smoke test from #3805
ax = plt.gca()
ColorbarBase(ax, plt.cm.bone)
@image_comparison(
baseline_images=['colorbar_closed_patch'],
remove_text=True)
def test_colorbar_closed_patch():
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_axes([0.05, 0.85, 0.9, 0.1])
ax2 = fig.add_axes([0.1, 0.65, 0.75, 0.1])
ax3 = fig.add_axes([0.05, 0.45, 0.9, 0.1])
ax4 = fig.add_axes([0.05, 0.25, 0.9, 0.1])
ax5 = fig.add_axes([0.05, 0.05, 0.9, 0.1])
cmap = cm.get_cmap("RdBu", lut=5)
im = ax1.pcolormesh(np.linspace(0, 10, 16).reshape((4, 4)), cmap=cmap)
# The use of a "values" kwarg here is unusual. It works only
# because it is matched to the data range in the image and to
# the number of colors in the LUT.
values = np.linspace(0, 10, 5)
cbar_kw = dict(cmap=cmap, orientation='horizontal', values=values,
ticks=[])
# The wide line is to show that the closed path is being handled
# correctly. See PR #4186.
with rc_context({'axes.linewidth': 16}):
plt.colorbar(im, cax=ax2, extend='both', extendfrac=0.5, **cbar_kw)
plt.colorbar(im, cax=ax3, extend='both', **cbar_kw)
plt.colorbar(im, cax=ax4, extend='both', extendrect=True, **cbar_kw)
plt.colorbar(im, cax=ax5, extend='neither', **cbar_kw)
def test_colorbar_ticks():
# test fix for #5673
fig, ax = plt.subplots()
x = np.arange(-3.0, 4.001)
y = np.arange(-4.0, 3.001)
X, Y = np.meshgrid(x, y)
Z = X * Y
clevs = np.array([-12, -5, 0, 5, 12], dtype=float)
colors = ['r', 'g', 'b', 'c']
cs = ax.contourf(X, Y, Z, clevs, colors=colors)
cbar = fig.colorbar(cs, ax=ax, extend='neither',
orientation='horizontal', ticks=clevs)
assert len(cbar.ax.xaxis.get_ticklocs()) == len(clevs)
def test_colorbar_minorticks_on_off():
# test for github issue #11510 and PR #11584
np.random.seed(seed=12345)
data = np.random.randn(20, 20)
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots()
# purposefully setting vmin and vmax to odd fractions
# so as to check for the correct locations of the minor ticks
im = ax.pcolormesh(data, vmin=-2.3, vmax=3.3)
cbar = fig.colorbar(im, extend='both')
cbar.minorticks_on()
correct_minorticklocs = np.array([-2.2, -1.8, -1.6, -1.4, -1.2, -0.8,
-0.6, -0.4, -0.2, 0.2, 0.4, 0.6,
0.8, 1.2, 1.4, 1.6, 1.8, 2.2, 2.4,
2.6, 2.8, 3.2])
# testing after minorticks_on()
np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(),
correct_minorticklocs)
cbar.minorticks_off()
# testing after minorticks_off()
np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(),
np.array([]))
im.set_clim(vmin=-1.2, vmax=1.2)
cbar.minorticks_on()
correct_minorticklocs = np.array([-1.2, -1.1, -0.9, -0.8, -0.7, -0.6,
-0.4, -0.3, -0.2, -0.1, 0.1, 0.2,
0.3, 0.4, 0.6, 0.7, 0.8, 0.9,
1.1, 1.2])
np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(),
correct_minorticklocs)
# tests for github issue #13257 and PR #13265
data = np.random.uniform(low=1, high=10, size=(20, 20))
fig, ax = plt.subplots()
im = ax.pcolormesh(data, norm=LogNorm())
cbar = fig.colorbar(im)
default_minorticklocks = cbar.ax.yaxis.get_minorticklocs()
# test that minorticks turn off for LogNorm
cbar.minorticks_off()
assert np.array_equal(cbar.ax.yaxis.get_minorticklocs(),
np.array([]))
# test that minorticks turn back on for LogNorm
cbar.minorticks_on()
assert np.array_equal(cbar.ax.yaxis.get_minorticklocs(),
default_minorticklocks)
# test issue #13339: minorticks for LogNorm should stay off
cbar.minorticks_off()
cbar.set_ticks([3, 5, 7, 9])
assert np.array_equal(cbar.ax.yaxis.get_minorticklocs(),
np.array([]))
def test_colorbar_autoticks():
# Test new autotick modes. Needs to be classic because
# non-classic doesn't go this route.
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(2, 1)
x = np.arange(-3.0, 4.001)
y = np.arange(-4.0, 3.001)
X, Y = np.meshgrid(x, y)
Z = X * Y
pcm = ax[0].pcolormesh(X, Y, Z)
cbar = fig.colorbar(pcm, ax=ax[0], extend='both',
orientation='vertical')
pcm = ax[1].pcolormesh(X, Y, Z)
cbar2 = fig.colorbar(pcm, ax=ax[1], extend='both',
orientation='vertical', shrink=0.4)
np.testing.assert_almost_equal(cbar.ax.yaxis.get_ticklocs(),
np.arange(-10, 11., 5.))
np.testing.assert_almost_equal(cbar2.ax.yaxis.get_ticklocs(),
np.arange(-10, 11., 10.))
def test_colorbar_autotickslog():
# Test new autotick modes...
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(2, 1)
x = np.arange(-3.0, 4.001)
y = np.arange(-4.0, 3.001)
X, Y = np.meshgrid(x, y)
Z = X * Y
pcm = ax[0].pcolormesh(X, Y, 10**Z, norm=LogNorm())
cbar = fig.colorbar(pcm, ax=ax[0], extend='both',
orientation='vertical')
pcm = ax[1].pcolormesh(X, Y, 10**Z, norm=LogNorm())
cbar2 = fig.colorbar(pcm, ax=ax[1], extend='both',
orientation='vertical', shrink=0.4)
np.testing.assert_almost_equal(cbar.ax.yaxis.get_ticklocs(),
10**np.arange(-12, 12.2, 4.))
np.testing.assert_almost_equal(cbar2.ax.yaxis.get_ticklocs(),
10**np.arange(-12, 13., 12.))
def test_colorbar_get_ticks():
# test feature for #5792
plt.figure()
data = np.arange(1200).reshape(30, 40)
levels = [0, 200, 400, 600, 800, 1000, 1200]
plt.subplot()
plt.contourf(data, levels=levels)
# testing getter for user set ticks
userTicks = plt.colorbar(ticks=[0, 600, 1200])
assert userTicks.get_ticks().tolist() == [0, 600, 1200]
# testing for getter after calling set_ticks
userTicks.set_ticks([600, 700, 800])
assert userTicks.get_ticks().tolist() == [600, 700, 800]
# testing for getter after calling set_ticks with some ticks out of bounds
userTicks.set_ticks([600, 1300, 1400, 1500])
assert userTicks.get_ticks().tolist() == [600]
# testing getter when no ticks are assigned
defTicks = plt.colorbar(orientation='horizontal')
assert defTicks.get_ticks().tolist() == levels
def test_colorbar_lognorm_extension():
# Test that colorbar with lognorm is extended correctly
f, ax = plt.subplots()
cb = ColorbarBase(ax, norm=LogNorm(vmin=0.1, vmax=1000.0),
orientation='vertical', extend='both')
assert cb._values[0] >= 0.0
def test_colorbar_powernorm_extension():
# Test that colorbar with powernorm is extended correctly
f, ax = plt.subplots()
cb = ColorbarBase(ax, norm=PowerNorm(gamma=0.5, vmin=0.0, vmax=1.0),
orientation='vertical', extend='both')
assert cb._values[0] >= 0.0
def test_colorbar_axes_kw():
# test fix for #8493: This does only test, that axes-related keywords pass
# and do not raise an exception.
plt.figure()
plt.imshow([[1, 2], [3, 4]])
plt.colorbar(orientation='horizontal', fraction=0.2, pad=0.2, shrink=0.5,
aspect=10, anchor=(0., 0.), panchor=(0., 1.))
def test_colorbar_log_minortick_labels():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots()
pcm = ax.imshow([[10000, 50000]], norm=LogNorm())
cb = fig.colorbar(pcm)
fig.canvas.draw()
lb = cb.ax.yaxis.get_ticklabels(which='both')
expected = [r'$\mathdefault{10^{4}}$',
r'$\mathdefault{2\times10^{4}}$',
r'$\mathdefault{3\times10^{4}}$',
r'$\mathdefault{4\times10^{4}}$']
for l, exp in zip(lb, expected):
assert l.get_text() == exp
def test_colorbar_renorm():
x, y = np.ogrid[-4:4:31j, -4:4:31j]
z = 120000*np.exp(-x**2 - y**2)
fig, ax = plt.subplots()
im = ax.imshow(z)
cbar = fig.colorbar(im)
assert np.allclose(cbar.ax.yaxis.get_majorticklocs(),
np.arange(0, 120000.1, 15000))
cbar.set_ticks([1, 2, 3])
assert isinstance(cbar.locator, FixedLocator)
norm = LogNorm(z.min(), z.max())
im.set_norm(norm)
assert isinstance(cbar.locator, _ColorbarLogLocator)
assert np.allclose(cbar.ax.yaxis.get_majorticklocs(),
np.logspace(-8, 5, 14))
# note that set_norm removes the FixedLocator...
assert np.isclose(cbar.vmin, z.min())
cbar.set_ticks([1, 2, 3])
assert isinstance(cbar.locator, FixedLocator)
assert np.allclose(cbar.ax.yaxis.get_majorticklocs(),
[1.0, 2.0, 3.0])
norm = LogNorm(z.min() * 1000, z.max() * 1000)
im.set_norm(norm)
assert np.isclose(cbar.vmin, z.min() * 1000)
assert np.isclose(cbar.vmax, z.max() * 1000)
def test_colorbar_format():
# make sure that format is passed properly
x, y = np.ogrid[-4:4:31j, -4:4:31j]
z = 120000*np.exp(-x**2 - y**2)
fig, ax = plt.subplots()
im = ax.imshow(z)
cbar = fig.colorbar(im, format='%4.2e')
fig.canvas.draw()
assert cbar.ax.yaxis.get_ticklabels()[4].get_text() == '6.00e+04'
# make sure that if we change the clim of the mappable that the
# formatting is *not* lost:
im.set_clim([4, 200])
fig.canvas.draw()
assert cbar.ax.yaxis.get_ticklabels()[4].get_text() == '8.00e+01'
# but if we change the norm:
im.set_norm(LogNorm(vmin=0.1, vmax=10))
fig.canvas.draw()
assert (cbar.ax.yaxis.get_ticklabels()[0].get_text() ==
r'$\mathdefault{10^{-1}}$')
def test_colorbar_scale_reset():
x, y = np.ogrid[-4:4:31j, -4:4:31j]
z = 120000*np.exp(-x**2 - y**2)
fig, ax = plt.subplots()
pcm = ax.pcolormesh(z, cmap='RdBu_r', rasterized=True)
cbar = fig.colorbar(pcm, ax=ax)
assert cbar.ax.yaxis.get_scale() == 'linear'
pcm.set_norm(LogNorm(vmin=1, vmax=100))
assert cbar.ax.yaxis.get_scale() == 'log'
pcm.set_norm(Normalize(vmin=-20, vmax=20))
assert cbar.ax.yaxis.get_scale() == 'linear'
def test_colorbar_get_ticks():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots()
np.random.seed(19680801)
pc = ax.pcolormesh(np.random.rand(30, 30))
cb = fig.colorbar(pc)
np.testing.assert_allclose(cb.get_ticks(), [0.2, 0.4, 0.6, 0.8])
def test_colorbar_inverted_ticks():
fig, axs = plt.subplots(2)
ax = axs[0]
pc = ax.pcolormesh(10**np.arange(1, 5).reshape(2, 2), norm=LogNorm())
cbar = fig.colorbar(pc, ax=ax, extend='both')
ticks = cbar.get_ticks()
cbar.ax.invert_yaxis()
np.testing.assert_allclose(ticks, cbar.get_ticks())
ax = axs[1]
pc = ax.pcolormesh(np.arange(1, 5).reshape(2, 2))
cbar = fig.colorbar(pc, ax=ax, extend='both')
cbar.minorticks_on()
ticks = cbar.get_ticks()
minorticks = cbar.get_ticks(minor=True)
cbar.ax.invert_yaxis()
np.testing.assert_allclose(ticks, cbar.get_ticks())
np.testing.assert_allclose(minorticks, cbar.get_ticks(minor=True))
def test_extend_colorbar_customnorm():
# This was a funny error with DivergingNorm, maybe with other norms,
# when extend='both'
N = 100
X, Y = np.mgrid[-3:3:complex(0, N), -2:2:complex(0, N)]
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
fig, ax = plt.subplots(2, 1)
pcm = ax[0].pcolormesh(X, Y, Z,
norm=DivergingNorm(vcenter=0., vmin=-2, vmax=1),
cmap='RdBu_r')
cb = fig.colorbar(pcm, ax=ax[0], extend='both')
np.testing.assert_allclose(cb.ax.get_position().extents,
[0.78375, 0.536364, 0.796147, 0.9], rtol=1e-3)
def test_mappable_no_alpha():
fig, ax = plt.subplots()
sm = cm.ScalarMappable(norm=mcolors.Normalize(), cmap='viridis')
fig.colorbar(sm)
sm.set_cmap('plasma')
plt.draw()
|
553802a0a217ba74c68bbe8309b6cc5e14acf7c849f3148ad2a6bfbb52f05266
|
"""
Tests specific to the lines module.
"""
import itertools
import timeit
from cycler import cycler
import numpy as np
import pytest
import matplotlib
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, check_figures_equal
# Runtimes on a loaded system are inherently flaky. Not so much that a rerun
# won't help, hopefully.
@pytest.mark.flaky(reruns=3)
def test_invisible_Line_rendering():
"""
Github issue #1256 identified a bug in Line.draw method
Despite visibility attribute set to False, the draw method was not
returning early enough and some pre-rendering code was executed
though not necessary.
Consequence was an excessive draw time for invisible Line instances
holding a large number of points (Npts> 10**6)
"""
# Creates big x and y data:
N = 10**7
x = np.linspace(0, 1, N)
y = np.random.normal(size=N)
# Create a plot figure:
fig = plt.figure()
ax = plt.subplot(111)
# Create a "big" Line instance:
l = mlines.Line2D(x, y)
l.set_visible(False)
# but don't add it to the Axis instance `ax`
# [here Interactive panning and zooming is pretty responsive]
# Time the canvas drawing:
t_no_line = min(timeit.repeat(fig.canvas.draw, number=1, repeat=3))
# (gives about 25 ms)
# Add the big invisible Line:
ax.add_line(l)
# [Now interactive panning and zooming is very slow]
# Time the canvas drawing:
t_invisible_line = min(timeit.repeat(fig.canvas.draw, number=1, repeat=3))
# gives about 290 ms for N = 10**7 pts
slowdown_factor = t_invisible_line / t_no_line
slowdown_threshold = 2 # trying to avoid false positive failures
assert slowdown_factor < slowdown_threshold
def test_set_line_coll_dash():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
# Testing setting linestyles for line collections.
# This should not produce an error.
cs = ax.contour(np.random.randn(20, 30), linestyles=[(0, (3, 3))])
assert True
@image_comparison(baseline_images=['line_dashes'], remove_text=True)
def test_line_dashes():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(range(10), linestyle=(0, (3, 3)), lw=5)
def test_line_colors():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(range(10), color='none')
ax.plot(range(10), color='r')
ax.plot(range(10), color='.3')
ax.plot(range(10), color=(1, 0, 0, 1))
ax.plot(range(10), color=(1, 0, 0))
fig.canvas.draw()
assert True
def test_linestyle_variants():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for ls in ["-", "solid", "--", "dashed",
"-.", "dashdot", ":", "dotted"]:
ax.plot(range(10), linestyle=ls)
fig.canvas.draw()
assert True
def test_valid_linestyles():
line = mlines.Line2D([], [])
with pytest.raises(ValueError):
line.set_linestyle('aardvark')
@image_comparison(baseline_images=['drawstyle_variants'], remove_text=True,
extensions=["png"])
def test_drawstyle_variants():
fig, axs = plt.subplots(6)
dss = ["default", "steps-mid", "steps-pre", "steps-post", "steps", None]
# We want to check that drawstyles are properly handled even for very long
# lines (for which the subslice optimization is on); however, we need
# to zoom in so that the difference between the drawstyles is actually
# visible.
for ax, ds in zip(axs.flat, dss):
ax.plot(range(2000), drawstyle=ds)
ax.set(xlim=(0, 2), ylim=(0, 2))
def test_valid_drawstyles():
line = mlines.Line2D([], [])
with pytest.raises(ValueError):
line.set_drawstyle('foobar')
def test_set_drawstyle():
x = np.linspace(0, 2*np.pi, 10)
y = np.sin(x)
fig, ax = plt.subplots()
line, = ax.plot(x, y)
line.set_drawstyle("steps-pre")
assert len(line.get_path().vertices) == 2*len(x)-1
line.set_drawstyle("default")
assert len(line.get_path().vertices) == len(x)
@image_comparison(baseline_images=['line_collection_dashes'],
remove_text=True, style='mpl20')
def test_set_line_coll_dash_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
cs = ax.contour(np.random.randn(20, 30), linestyles=[(0, (3, 3))])
@image_comparison(baseline_images=['marker_fill_styles'], remove_text=True,
extensions=['png'])
def test_marker_fill_styles():
colors = itertools.cycle([[0, 0, 1], 'g', '#ff0000', 'c', 'm', 'y',
np.array([0, 0, 0])])
altcolor = 'lightgreen'
y = np.array([1, 1])
x = np.array([0, 9])
fig, ax = plt.subplots()
for j, marker in enumerate(mlines.Line2D.filled_markers):
for i, fs in enumerate(mlines.Line2D.fillStyles):
color = next(colors)
ax.plot(j * 10 + x, y + i + .5 * (j % 2),
marker=marker,
markersize=20,
markerfacecoloralt=altcolor,
fillstyle=fs,
label=fs,
linewidth=5,
color=color,
markeredgecolor=color,
markeredgewidth=2)
ax.set_ylim([0, 7.5])
ax.set_xlim([-5, 155])
@image_comparison(baseline_images=['scaled_lines'], style='default')
def test_lw_scaling():
th = np.linspace(0, 32)
fig, ax = plt.subplots()
lins_styles = ['dashed', 'dotted', 'dashdot']
cy = cycler(matplotlib.rcParams['axes.prop_cycle'])
for j, (ls, sty) in enumerate(zip(lins_styles, cy)):
for lw in np.linspace(.5, 10, 10):
ax.plot(th, j*np.ones(50) + .1 * lw, linestyle=ls, lw=lw, **sty)
def test_nan_is_sorted():
line = mlines.Line2D([], [])
assert line._is_sorted(np.array([1, 2, 3]))
assert line._is_sorted(np.array([1, np.nan, 3]))
assert not line._is_sorted([3, 5] + [np.nan] * 100 + [0, 2])
@check_figures_equal()
def test_step_markers(fig_test, fig_ref):
fig_test.subplots().step([0, 1], "-o")
fig_ref.subplots().plot([0, 0, 1], [0, 1, 1], "-o", markevery=[0, 2])
|
eaba7ed8f2feb75c199547f4e5daca5b0c3236ecac447d152517e92fa31c3cf0
|
import numpy as np
from io import BytesIO
import os
import re
import tempfile
import warnings
import xml.parsers.expat
import pytest
import matplotlib as mpl
from matplotlib import dviread
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
with warnings.catch_warnings():
warnings.simplefilter('ignore')
needs_usetex = pytest.mark.skipif(
not mpl.checkdep_usetex(True),
reason="This test needs a TeX installation")
def test_visibility():
fig, ax = plt.subplots()
x = np.linspace(0, 4 * np.pi, 50)
y = np.sin(x)
yerr = np.ones_like(y)
a, b, c = ax.errorbar(x, y, yerr=yerr, fmt='ko')
for artist in b:
artist.set_visible(False)
fd = BytesIO()
fig.savefig(fd, format='svg')
fd.seek(0)
buf = fd.read()
fd.close()
parser = xml.parsers.expat.ParserCreate()
parser.Parse(buf) # this will raise ExpatError if the svg is invalid
@image_comparison(baseline_images=['fill_black_with_alpha'], remove_text=True,
extensions=['svg'])
def test_fill_black_with_alpha():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x=[0, 0.1, 1], y=[0, 0, 0], c='k', alpha=0.1, s=10000)
@image_comparison(baseline_images=['noscale'], remove_text=True)
def test_noscale():
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.imshow(Z, cmap='gray', interpolation='none')
def test_text_urls():
fig = plt.figure()
test_url = "http://test_text_urls.matplotlib.org"
fig.suptitle("test_text_urls", url=test_url)
fd = BytesIO()
fig.savefig(fd, format='svg')
fd.seek(0)
buf = fd.read().decode()
fd.close()
expected = '<a xlink:href="{0}">'.format(test_url)
assert expected in buf
@image_comparison(baseline_images=['white_space_pre'], extensions=['svg'])
def test_white_space_pre():
plt.rcParams["svg.fonttype"] = "none"
fig = plt.figure()
fig.text(.5, .5, "a b c")
@image_comparison(baseline_images=['bold_font_output'], extensions=['svg'])
def test_bold_font_output():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(np.arange(10), np.arange(10))
ax.set_xlabel('nonbold-xlabel')
ax.set_ylabel('bold-ylabel', fontweight='bold')
ax.set_title('bold-title', fontweight='bold')
@image_comparison(baseline_images=['bold_font_output_with_none_fonttype'],
extensions=['svg'])
def test_bold_font_output_with_none_fonttype():
plt.rcParams['svg.fonttype'] = 'none'
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(np.arange(10), np.arange(10))
ax.set_xlabel('nonbold-xlabel')
ax.set_ylabel('bold-ylabel', fontweight='bold')
ax.set_title('bold-title', fontweight='bold')
def _test_determinism_save(filename, usetex):
# This function is mostly copy&paste from "def test_visibility"
mpl.rc('svg', hashsalt='asdf')
mpl.rc('text', usetex=usetex)
fig = Figure() # Require no GUI.
ax = fig.add_subplot(111)
x = np.linspace(0, 4 * np.pi, 50)
y = np.sin(x)
yerr = np.ones_like(y)
a, b, c = ax.errorbar(x, y, yerr=yerr, fmt='ko')
for artist in b:
artist.set_visible(False)
ax.set_title('A string $1+2+\\sigma$')
ax.set_xlabel('A string $1+2+\\sigma$')
ax.set_ylabel('A string $1+2+\\sigma$')
fig.savefig(filename, format="svg")
@pytest.mark.parametrize(
"filename, usetex",
# unique filenames to allow for parallel testing
[("determinism_notex.svg", False),
pytest.param("determinism_tex.svg", True, marks=needs_usetex)])
def test_determinism(filename, usetex):
import sys
from subprocess import check_output, STDOUT, CalledProcessError
plots = []
for i in range(3):
# Using check_output and setting stderr to STDOUT will capture the real
# problem in the output property of the exception
try:
check_output(
[sys.executable, '-R', '-c',
'import matplotlib; '
'matplotlib._called_from_pytest = True; '
'matplotlib.use("svg", force=True); '
'from matplotlib.tests.test_backend_svg '
'import _test_determinism_save;'
'_test_determinism_save(%r, %r)' % (filename, usetex)],
stderr=STDOUT)
except CalledProcessError as e:
# it's easier to use utf8 and ask for forgiveness than try
# to figure out what the current console has as an
# encoding :-/
print(e.output.decode(encoding="utf-8", errors="ignore"))
raise e
else:
with open(filename, 'rb') as fd:
plots.append(fd.read())
finally:
os.unlink(filename)
for p in plots[1:]:
assert p == plots[0]
@needs_usetex
def test_missing_psfont(monkeypatch):
"""An error is raised if a TeX font lacks a Type-1 equivalent"""
def psfont(*args, **kwargs):
return dviread.PsFont(texname='texfont', psname='Some Font',
effects=None, encoding=None, filename=None)
monkeypatch.setattr(dviread.PsfontsMap, '__getitem__', psfont)
mpl.rc('text', usetex=True)
fig, ax = plt.subplots()
ax.text(0.5, 0.5, 'hello')
with tempfile.TemporaryFile() as tmpfile, pytest.raises(ValueError):
fig.savefig(tmpfile, format='svg')
# Use Computer Modern Sans Serif, not Helvetica (which has no \textwon).
@pytest.mark.style('default')
@needs_usetex
def test_unicode_won():
fig = Figure()
fig.text(.5, .5, r'\textwon', usetex=True)
with BytesIO() as fd:
fig.savefig(fd, format='svg')
buf = fd.getvalue().decode('ascii')
won_id = 'Computer_Modern_Sans_Serif-142'
assert re.search(r'<path d=(.|\s)*?id="{0}"/>'.format(won_id), buf)
assert re.search(r'<use[^/>]*? xlink:href="#{0}"/>'.format(won_id), buf)
|
33c4d207478d3a070bb5e0c9c1249cd4649ae9f61cbc2d0741deb476328c84ab
|
from io import BytesIO
from matplotlib import afm
from matplotlib import font_manager as fm
# See note in afm.py re: use of comma as decimal separator in the
# UnderlineThickness field and re: use of non-ASCII characters in the Notice
# field.
AFM_TEST_DATA = b"""StartFontMetrics 2.0
Comment Comments are ignored.
Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017
FontName MyFont-Bold
EncodingScheme FontSpecific
FullName My Font Bold
FamilyName Test Fonts
Weight Bold
ItalicAngle 0.0
IsFixedPitch false
UnderlinePosition -100
UnderlineThickness 56,789
Version 001.000
Notice Copyright \xa9 2017 No one.
FontBBox 0 -321 1234 369
StartCharMetrics 3
C 0 ; WX 250 ; N space ; B 0 0 0 0 ;
C 42 ; WX 1141 ; N foo ; B 40 60 800 360 ;
C 99 ; WX 583 ; N bar ; B 40 -10 543 210 ;
EndCharMetrics
EndFontMetrics
"""
def test_nonascii_str():
# This tests that we also decode bytes as utf-8 properly.
# Else, font files with non ascii characters fail to load.
inp_str = "привет"
byte_str = inp_str.encode("utf8")
ret = afm._to_str(byte_str)
assert ret == inp_str
def test_parse_header():
fh = BytesIO(AFM_TEST_DATA)
header = afm._parse_header(fh)
assert header == {
b'StartFontMetrics': 2.0,
b'FontName': 'MyFont-Bold',
b'EncodingScheme': 'FontSpecific',
b'FullName': 'My Font Bold',
b'FamilyName': 'Test Fonts',
b'Weight': 'Bold',
b'ItalicAngle': 0.0,
b'IsFixedPitch': False,
b'UnderlinePosition': -100,
b'UnderlineThickness': 56.789,
b'Version': '001.000',
b'Notice': b'Copyright \xa9 2017 No one.',
b'FontBBox': [0, -321, 1234, 369],
b'StartCharMetrics': 3,
}
def test_parse_char_metrics():
fh = BytesIO(AFM_TEST_DATA)
afm._parse_header(fh) # position
metrics = afm._parse_char_metrics(fh)
assert metrics == (
{0: (250.0, 'space', [0, 0, 0, 0]),
42: (1141.0, 'foo', [40, 60, 800, 360]),
99: (583.0, 'bar', [40, -10, 543, 210]),
},
{'space': (250.0, 'space', [0, 0, 0, 0]),
'foo': (1141.0, 'foo', [40, 60, 800, 360]),
'bar': (583.0, 'bar', [40, -10, 543, 210]),
})
def test_get_familyname_guessed():
fh = BytesIO(AFM_TEST_DATA)
font = afm.AFM(fh)
del font._header[b'FamilyName'] # remove FamilyName, so we have to guess
assert font.get_familyname() == 'My Font'
def test_font_manager_weight_normalization():
font = afm.AFM(BytesIO(
AFM_TEST_DATA.replace(b"Weight Bold\n", b"Weight Custom\n")))
assert fm.afmFontProperty("", font).weight == "normal"
|
f4a10f6864f21ddba4eef8b8d246ef6b4dc7164edb9bc2ab1a55a390118ca10d
|
import difflib
import subprocess
import sys
from pathlib import Path
import pytest
import matplotlib as mpl
from matplotlib import pyplot as plt
def test_pyplot_up_to_date():
gen_script = Path(mpl.__file__).parents[2] / "tools/boilerplate.py"
if not gen_script.exists():
pytest.skip("boilerplate.py not found")
orig_contents = Path(plt.__file__).read_text()
try:
subprocess.run([sys.executable, str(gen_script)], check=True)
new_contents = Path(plt.__file__).read_text()
if orig_contents != new_contents:
diff_msg = '\n'.join(
difflib.unified_diff(
orig_contents.split('\n'), new_contents.split('\n'),
fromfile='found pyplot.py',
tofile='expected pyplot.py',
n=0, lineterm=''))
pytest.fail(
"pyplot.py is not up-to-date. Please run "
"'python tools/boilerplate.py' to update pyplot.py. "
"This needs to be done from an environment where your "
"current working copy is installed (e.g. 'pip install -e'd). "
"Here is a diff of unexpected differences:\n%s" % diff_msg
)
finally:
Path(plt.__file__).write_text(orig_contents)
def test_pyplot_box():
fig, ax = plt.subplots()
plt.box(False)
assert not ax.get_frame_on()
plt.box(True)
assert ax.get_frame_on()
plt.box()
assert not ax.get_frame_on()
plt.box()
assert ax.get_frame_on()
def test_stackplot_smoke():
# Small smoke test for stackplot (see #12405)
plt.stackplot([1, 2, 3], [1, 2, 3])
|
55f15f0aae53de369537452debb487a629543b397eff36890e71b17c612dae00
|
"""Tests for tinypages build using sphinx extensions."""
import filecmp
from os.path import join as pjoin, dirname, isdir
from subprocess import Popen, PIPE
import sys
import pytest
pytest.importorskip('sphinx')
def test_tinypages(tmpdir):
html_dir = pjoin(str(tmpdir), 'html')
doctree_dir = pjoin(str(tmpdir), 'doctrees')
# Build the pages with warnings turned into errors
cmd = [sys.executable, '-msphinx', '-W', '-b', 'html', '-d', doctree_dir,
pjoin(dirname(__file__), 'tinypages'), html_dir]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = proc.communicate()
assert proc.returncode == 0, \
"sphinx build failed with stdout:\n{}\nstderr:\n{}\n".format(out, err)
if err:
pytest.fail("sphinx build emitted the following warnings:\n{}"
.format(err))
assert isdir(html_dir)
def plot_file(num):
return pjoin(html_dir, 'some_plots-{0}.png'.format(num))
range_10, range_6, range_4 = [plot_file(i) for i in range(1, 4)]
# Plot 5 is range(6) plot
assert filecmp.cmp(range_6, plot_file(5))
# Plot 7 is range(4) plot
assert filecmp.cmp(range_4, plot_file(7))
# Plot 11 is range(10) plot
assert filecmp.cmp(range_10, plot_file(11))
# Plot 12 uses the old range(10) figure and the new range(6) figure
assert filecmp.cmp(range_10, plot_file('12_00'))
assert filecmp.cmp(range_6, plot_file('12_01'))
# Plot 13 shows close-figs in action
assert filecmp.cmp(range_4, plot_file(13))
# Plot 14 has included source
with open(pjoin(html_dir, 'some_plots.html'), 'rb') as fobj:
html_contents = fobj.read()
assert b'# Only a comment' in html_contents
# check plot defined in external file.
assert filecmp.cmp(range_4, pjoin(html_dir, 'range4.png'))
assert filecmp.cmp(range_6, pjoin(html_dir, 'range6.png'))
# check if figure caption made it into html file
assert b'This is the caption for plot 15.' in html_contents
|
43572a40987a340afe247268dda3cf242228f1e57cfd1e1fdcb9d01dd586ccc6
|
import pytest
import platform
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
import matplotlib.patches as mpatches
def draw_arrow(ax, t, r):
ax.annotate('', xy=(0.5, 0.5 + r), xytext=(0.5, 0.5), size=30,
arrowprops=dict(arrowstyle=t,
fc="b", ec='k'))
@image_comparison(baseline_images=['fancyarrow_test_image'])
def test_fancyarrow():
# Added 0 to test division by zero error described in issue 3930
r = [0.4, 0.3, 0.2, 0.1, 0]
t = ["fancy", "simple", mpatches.ArrowStyle.Fancy()]
fig, axes = plt.subplots(len(t), len(r), squeeze=False,
subplot_kw=dict(aspect=True),
figsize=(8, 4.5))
for i_r, r1 in enumerate(r):
for i_t, t1 in enumerate(t):
ax = axes[i_t, i_r]
draw_arrow(ax, t1, r1)
ax.tick_params(labelleft=False, labelbottom=False)
@image_comparison(baseline_images=['boxarrow_test_image'], extensions=['png'])
def test_boxarrow():
styles = mpatches.BoxStyle.get_styles()
n = len(styles)
spacing = 1.2
figheight = (n * spacing + .5)
fig = plt.figure(figsize=(4 / 1.5, figheight / 1.5))
fontsize = 0.3 * 72
for i, stylename in enumerate(sorted(styles)):
fig.text(0.5, ((n - i) * spacing - 0.5)/figheight, stylename,
ha="center",
size=fontsize,
transform=fig.transFigure,
bbox=dict(boxstyle=stylename, fc="w", ec="k"))
def __prepare_fancyarrow_dpi_cor_test():
"""
Convenience function that prepares and returns a FancyArrowPatch. It aims
at being used to test that the size of the arrow head does not depend on
the DPI value of the exported picture.
NB: this function *is not* a test in itself!
"""
fig2 = plt.figure("fancyarrow_dpi_cor_test", figsize=(4, 3), dpi=50)
ax = fig2.add_subplot(111)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.add_patch(mpatches.FancyArrowPatch(posA=(0.3, 0.4), posB=(0.8, 0.6),
lw=3, arrowstyle='->',
mutation_scale=100))
return fig2
@image_comparison(baseline_images=['fancyarrow_dpi_cor_100dpi'],
remove_text=True, extensions=['png'],
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
savefig_kwarg=dict(dpi=100))
def test_fancyarrow_dpi_cor_100dpi():
"""
Check the export of a FancyArrowPatch @ 100 DPI. FancyArrowPatch is
instantiated through a dedicated function because another similar test
checks a similar export but with a different DPI value.
Remark: test only a rasterized format.
"""
__prepare_fancyarrow_dpi_cor_test()
@image_comparison(baseline_images=['fancyarrow_dpi_cor_200dpi'],
remove_text=True, extensions=['png'],
tol={'aarch64': 0.02}.get(platform.machine(), 0.0),
savefig_kwarg=dict(dpi=200))
def test_fancyarrow_dpi_cor_200dpi():
"""
As test_fancyarrow_dpi_cor_100dpi, but exports @ 200 DPI. The relative size
of the arrow head should be the same.
"""
__prepare_fancyarrow_dpi_cor_test()
@image_comparison(baseline_images=['fancyarrow_dash'],
remove_text=True, extensions=['png'],
style='default')
def test_fancyarrow_dash():
from matplotlib.patches import FancyArrowPatch
fig, ax = plt.subplots()
e = FancyArrowPatch((0, 0), (0.5, 0.5),
arrowstyle='-|>',
connectionstyle='angle3,angleA=0,angleB=90',
mutation_scale=10.0,
linewidth=2,
linestyle='dashed',
color='k')
e2 = FancyArrowPatch((0, 0), (0.5, 0.5),
arrowstyle='-|>',
connectionstyle='angle3',
mutation_scale=10.0,
linewidth=2,
linestyle='dotted',
color='k')
ax.add_patch(e)
ax.add_patch(e2)
@image_comparison(baseline_images=['arrow_styles'], extensions=['png'],
style='mpl20', remove_text=True)
def test_arrow_styles():
styles = mpatches.ArrowStyle.get_styles()
n = len(styles)
fig, ax = plt.subplots(figsize=(6, 10))
ax.set_xlim(0, 1)
ax.set_ylim(-1, n)
for i, stylename in enumerate(sorted(styles)):
patch = mpatches.FancyArrowPatch((0.1, i), (0.8, i),
arrowstyle=stylename,
mutation_scale=25)
ax.add_patch(patch)
@image_comparison(baseline_images=['connection_styles'], extensions=['png'],
style='mpl20', remove_text=True)
def test_connection_styles():
styles = mpatches.ConnectionStyle.get_styles()
n = len(styles)
fig, ax = plt.subplots(figsize=(6, 10))
ax.set_xlim(0, 1)
ax.set_ylim(-1, n)
for i, stylename in enumerate(sorted(styles)):
patch = mpatches.FancyArrowPatch((0.1, i), (0.8, i + 0.5),
arrowstyle="->",
connectionstyle=stylename,
mutation_scale=25)
ax.add_patch(patch)
def test_invalid_intersection():
conn_style_1 = mpatches.ConnectionStyle.Angle3(angleA=20, angleB=200)
p1 = mpatches.FancyArrowPatch((.2, .2), (.5, .5),
connectionstyle=conn_style_1)
with pytest.raises(ValueError):
plt.gca().add_patch(p1)
conn_style_2 = mpatches.ConnectionStyle.Angle3(angleA=20, angleB=199.9)
p2 = mpatches.FancyArrowPatch((.2, .2), (.5, .5),
connectionstyle=conn_style_2)
plt.gca().add_patch(p2)
|
b224d6b8962f8437c9a48a9c160fc8c63d9247768eb97a6a845cd1b3d20ba5b3
|
import copy
import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from matplotlib import cycler
import matplotlib
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.colorbar as mcolorbar
import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
def test_resample():
"""
Github issue #6025 pointed to incorrect ListedColormap._resample;
here we test the method for LinearSegmentedColormap as well.
"""
n = 101
colorlist = np.empty((n, 4), float)
colorlist[:, 0] = np.linspace(0, 1, n)
colorlist[:, 1] = 0.2
colorlist[:, 2] = np.linspace(1, 0, n)
colorlist[:, 3] = 0.7
lsc = mcolors.LinearSegmentedColormap.from_list('lsc', colorlist)
lc = mcolors.ListedColormap(colorlist)
lsc3 = lsc._resample(3)
lc3 = lc._resample(3)
expected = np.array([[0.0, 0.2, 1.0, 0.7],
[0.5, 0.2, 0.5, 0.7],
[1.0, 0.2, 0.0, 0.7]], float)
assert_array_almost_equal(lsc3([0, 0.5, 1]), expected)
assert_array_almost_equal(lc3([0, 0.5, 1]), expected)
def test_colormap_copy():
cm = plt.cm.Reds
cm_copy = copy.copy(cm)
with np.errstate(invalid='ignore'):
ret1 = cm_copy([-1, 0, .5, 1, np.nan, np.inf])
cm2 = copy.copy(cm_copy)
cm2.set_bad('g')
with np.errstate(invalid='ignore'):
ret2 = cm_copy([-1, 0, .5, 1, np.nan, np.inf])
assert_array_equal(ret1, ret2)
def test_colormap_endian():
"""
Github issue #1005: a bug in putmask caused erroneous
mapping of 1.0 when input from a non-native-byteorder
array.
"""
cmap = cm.get_cmap("jet")
# Test under, over, and invalid along with values 0 and 1.
a = [-0.5, 0, 0.5, 1, 1.5, np.nan]
for dt in ["f2", "f4", "f8"]:
anative = np.ma.masked_invalid(np.array(a, dtype=dt))
aforeign = anative.byteswap().newbyteorder()
assert_array_equal(cmap(anative), cmap(aforeign))
def test_BoundaryNorm():
"""
Github issue #1258: interpolation was failing with numpy
1.7 pre-release.
"""
boundaries = [0, 1.1, 2.2]
vals = [-1, 0, 1, 2, 2.2, 4]
# Without interpolation
expected = [-1, 0, 0, 1, 2, 2]
ncolors = len(boundaries) - 1
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# ncolors != len(boundaries) - 1 triggers interpolation
expected = [-1, 0, 0, 2, 3, 3]
ncolors = len(boundaries)
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# more boundaries for a third color
boundaries = [0, 1, 2, 3]
vals = [-1, 0.1, 1.1, 2.2, 4]
ncolors = 5
expected = [-1, 0, 2, 4, 5]
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# a scalar as input should not trigger an error and should return a scalar
boundaries = [0, 1, 2]
vals = [-1, 0.1, 1.1, 2.2]
bn = mcolors.BoundaryNorm(boundaries, 2)
expected = [-1, 0, 1, 2]
for v, ex in zip(vals, expected):
ret = bn(v)
assert isinstance(ret, int)
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# same with interp
bn = mcolors.BoundaryNorm(boundaries, 3)
expected = [-1, 0, 2, 3]
for v, ex in zip(vals, expected):
ret = bn(v)
assert isinstance(ret, int)
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# Clipping
bn = mcolors.BoundaryNorm(boundaries, 3, clip=True)
expected = [0, 0, 2, 2]
for v, ex in zip(vals, expected):
ret = bn(v)
assert isinstance(ret, int)
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# Masked arrays
boundaries = [0, 1.1, 2.2]
vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9])
# Without interpolation
ncolors = len(boundaries) - 1
bn = mcolors.BoundaryNorm(boundaries, ncolors)
expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0])
assert_array_equal(bn(vals), expected)
# With interpolation
bn = mcolors.BoundaryNorm(boundaries, len(boundaries))
expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0])
assert_array_equal(bn(vals), expected)
# Non-trivial masked arrays
vals = np.ma.masked_invalid([np.Inf, np.NaN])
assert np.all(bn(vals).mask)
vals = np.ma.masked_invalid([np.Inf])
assert np.all(bn(vals).mask)
@pytest.mark.parametrize("vmin,vmax", [[-1, 2], [3, 1]])
def test_lognorm_invalid(vmin, vmax):
# Check that invalid limits in LogNorm error
norm = mcolors.LogNorm(vmin=vmin, vmax=vmax)
with pytest.raises(ValueError):
norm(1)
with pytest.raises(ValueError):
norm.inverse(1)
def test_LogNorm():
"""
LogNorm ignored clip, now it has the same
behavior as Normalize, e.g., values > vmax are bigger than 1
without clip, with clip they are 1.
"""
ln = mcolors.LogNorm(clip=True, vmax=5)
assert_array_equal(ln([1, 6]), [0, 1.0])
def test_PowerNorm():
a = np.array([0, 0.5, 1, 1.5], dtype=float)
pnorm = mcolors.PowerNorm(1)
norm = mcolors.Normalize()
assert_array_almost_equal(norm(a), pnorm(a))
a = np.array([-0.5, 0, 2, 4, 8], dtype=float)
expected = [0, 0, 1/16, 1/4, 1]
pnorm = mcolors.PowerNorm(2, vmin=0, vmax=8)
assert_array_almost_equal(pnorm(a), expected)
assert pnorm(a[0]) == expected[0]
assert pnorm(a[2]) == expected[2]
assert_array_almost_equal(a[1:], pnorm.inverse(pnorm(a))[1:])
# Clip = True
a = np.array([-0.5, 0, 1, 8, 16], dtype=float)
expected = [0, 0, 0, 1, 1]
pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=True)
assert_array_almost_equal(pnorm(a), expected)
assert pnorm(a[0]) == expected[0]
assert pnorm(a[-1]) == expected[-1]
# Clip = True at call time
a = np.array([-0.5, 0, 1, 8, 16], dtype=float)
expected = [0, 0, 0, 1, 1]
pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=False)
assert_array_almost_equal(pnorm(a, clip=True), expected)
assert pnorm(a[0], clip=True) == expected[0]
assert pnorm(a[-1], clip=True) == expected[-1]
def test_PowerNorm_translation_invariance():
a = np.array([0, 1/2, 1], dtype=float)
expected = [0, 1/8, 1]
pnorm = mcolors.PowerNorm(vmin=0, vmax=1, gamma=3)
assert_array_almost_equal(pnorm(a), expected)
pnorm = mcolors.PowerNorm(vmin=-2, vmax=-1, gamma=3)
assert_array_almost_equal(pnorm(a - 2), expected)
def test_Normalize():
norm = mcolors.Normalize()
vals = np.arange(-10, 10, 1, dtype=float)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
# Handle integer input correctly (don't overflow when computing max-min,
# i.e. 127-(-128) here).
vals = np.array([-128, 127], dtype=np.int8)
norm = mcolors.Normalize(vals.min(), vals.max())
assert_array_equal(np.asarray(norm(vals)), [0, 1])
# Don't lose precision on longdoubles (float128 on Linux):
# for array inputs...
vals = np.array([1.2345678901, 9.8765432109], dtype=np.longdouble)
norm = mcolors.Normalize(vals.min(), vals.max())
assert_array_equal(np.asarray(norm(vals)), [0, 1])
# and for scalar ones.
eps = np.finfo(np.longdouble).resolution
norm = plt.Normalize(1, 1 + 100 * eps)
# This returns exactly 0.5 when longdouble is extended precision (80-bit),
# but only a value close to it when it is quadruple precision (128-bit).
assert 0 < norm(1 + 50 * eps) < 1
def test_DivergingNorm_autoscale():
norm = mcolors.DivergingNorm(vcenter=20)
norm.autoscale([10, 20, 30, 40])
assert norm.vmin == 10.
assert norm.vmax == 40.
def test_DivergingNorm_autoscale_None_vmin():
norm = mcolors.DivergingNorm(2, vmin=0, vmax=None)
norm.autoscale_None([1, 2, 3, 4, 5])
assert norm(5) == 1
assert norm.vmax == 5
def test_DivergingNorm_autoscale_None_vmax():
norm = mcolors.DivergingNorm(2, vmin=None, vmax=10)
norm.autoscale_None([1, 2, 3, 4, 5])
assert norm(1) == 0
assert norm.vmin == 1
def test_DivergingNorm_scale():
norm = mcolors.DivergingNorm(2)
assert norm.scaled() is False
norm([1, 2, 3, 4])
assert norm.scaled() is True
def test_DivergingNorm_scaleout_center():
# test the vmin never goes above vcenter
norm = mcolors.DivergingNorm(vcenter=0)
x = norm([1, 2, 3, 5])
assert norm.vmin == 0
assert norm.vmax == 5
def test_DivergingNorm_scaleout_center_max():
# test the vmax never goes below vcenter
norm = mcolors.DivergingNorm(vcenter=0)
x = norm([-1, -2, -3, -5])
assert norm.vmax == 0
assert norm.vmin == -5
def test_DivergingNorm_Even():
norm = mcolors.DivergingNorm(vmin=-1, vcenter=0, vmax=4)
vals = np.array([-1.0, -0.5, 0.0, 1.0, 2.0, 3.0, 4.0])
expected = np.array([0.0, 0.25, 0.5, 0.625, 0.75, 0.875, 1.0])
assert_array_equal(norm(vals), expected)
def test_DivergingNorm_Odd():
norm = mcolors.DivergingNorm(vmin=-2, vcenter=0, vmax=5)
vals = np.array([-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
expected = np.array([0.0, 0.25, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
assert_array_equal(norm(vals), expected)
def test_DivergingNorm_VminEqualsVcenter():
with pytest.raises(ValueError):
norm = mcolors.DivergingNorm(vmin=-2, vcenter=-2, vmax=2)
def test_DivergingNorm_VmaxEqualsVcenter():
with pytest.raises(ValueError):
norm = mcolors.DivergingNorm(vmin=-2, vcenter=2, vmax=2)
def test_DivergingNorm_VminGTVcenter():
with pytest.raises(ValueError):
norm = mcolors.DivergingNorm(vmin=10, vcenter=0, vmax=20)
def test_DivergingNorm_DivergingNorm_VminGTVmax():
with pytest.raises(ValueError):
norm = mcolors.DivergingNorm(vmin=10, vcenter=0, vmax=5)
def test_DivergingNorm_VcenterGTVmax():
vals = np.arange(50)
with pytest.raises(ValueError):
norm = mcolors.DivergingNorm(vmin=10, vcenter=25, vmax=20)
def test_DivergingNorm_premature_scaling():
norm = mcolors.DivergingNorm(vcenter=2)
with pytest.raises(ValueError):
norm.inverse(np.array([0.1, 0.5, 0.9]))
def test_SymLogNorm():
"""
Test SymLogNorm behavior
"""
norm = mcolors.SymLogNorm(3, vmax=5, linscale=1.2)
vals = np.array([-30, -1, 2, 6], dtype=float)
normed_vals = norm(vals)
expected = [0., 0.53980074, 0.826991, 1.02758204]
assert_array_almost_equal(normed_vals, expected)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
# Ensure that specifying vmin returns the same result as above
norm = mcolors.SymLogNorm(3, vmin=-30, vmax=5, linscale=1.2)
normed_vals = norm(vals)
assert_array_almost_equal(normed_vals, expected)
def test_SymLogNorm_colorbar():
"""
Test un-called SymLogNorm in a colorbar.
"""
norm = mcolors.SymLogNorm(0.1, vmin=-1, vmax=1, linscale=1)
fig = plt.figure()
cbar = mcolorbar.ColorbarBase(fig.add_subplot(111), norm=norm)
plt.close(fig)
def test_SymLogNorm_single_zero():
"""
Test SymLogNorm to ensure it is not adding sub-ticks to zero label
"""
fig = plt.figure()
norm = mcolors.SymLogNorm(1e-5, vmin=-1, vmax=1)
cbar = mcolorbar.ColorbarBase(fig.add_subplot(111), norm=norm)
ticks = cbar.get_ticks()
assert sum(ticks == 0) == 1
plt.close(fig)
def _inverse_tester(norm_instance, vals):
"""
Checks if the inverse of the given normalization is working.
"""
assert_array_almost_equal(norm_instance.inverse(norm_instance(vals)), vals)
def _scalar_tester(norm_instance, vals):
"""
Checks if scalars and arrays are handled the same way.
Tests only for float.
"""
scalar_result = [norm_instance(float(v)) for v in vals]
assert_array_almost_equal(scalar_result, norm_instance(vals))
def _mask_tester(norm_instance, vals):
"""
Checks mask handling
"""
masked_array = np.ma.array(vals)
masked_array[0] = np.ma.masked
assert_array_equal(masked_array.mask, norm_instance(masked_array).mask)
@image_comparison(baseline_images=['levels_and_colors'],
extensions=['png'])
def test_cmap_and_norm_from_levels_and_colors():
data = np.linspace(-2, 4, 49).reshape(7, 7)
levels = [-1, 2, 2.5, 3]
colors = ['red', 'green', 'blue', 'yellow', 'black']
extend = 'both'
cmap, norm = mcolors.from_levels_and_colors(levels, colors, extend=extend)
ax = plt.axes()
m = plt.pcolormesh(data, cmap=cmap, norm=norm)
plt.colorbar(m)
# Hide the axes labels (but not the colorbar ones, as they are useful)
ax.tick_params(labelleft=False, labelbottom=False)
def test_cmap_and_norm_from_levels_and_colors2():
levels = [-1, 2, 2.5, 3]
colors = ['red', (0, 1, 0), 'blue', (0.5, 0.5, 0.5), (0.0, 0.0, 0.0, 1.0)]
clr = mcolors.to_rgba_array(colors)
bad = (0.1, 0.1, 0.1, 0.1)
no_color = (0.0, 0.0, 0.0, 0.0)
masked_value = 'masked_value'
# Define the test values which are of interest.
# Note: levels are lev[i] <= v < lev[i+1]
tests = [('both', None, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: clr[4],
3.5: clr[4],
masked_value: bad}),
('min', -1, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: no_color,
3.5: no_color,
masked_value: bad}),
('max', -1, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: clr[3],
3.5: clr[3],
masked_value: bad}),
('neither', -2, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: no_color,
3.5: no_color,
masked_value: bad}),
]
for extend, i1, cases in tests:
cmap, norm = mcolors.from_levels_and_colors(levels, colors[0:i1],
extend=extend)
cmap.set_bad(bad)
for d_val, expected_color in cases.items():
if d_val == masked_value:
d_val = np.ma.array([1], mask=True)
else:
d_val = [d_val]
assert_array_equal(expected_color, cmap(norm(d_val))[0],
'Wih extend={0!r} and data '
'value={1!r}'.format(extend, d_val))
with pytest.raises(ValueError):
mcolors.from_levels_and_colors(levels, colors)
def test_rgb_hsv_round_trip():
for a_shape in [(500, 500, 3), (500, 3), (1, 3), (3,)]:
np.random.seed(0)
tt = np.random.random(a_shape)
assert_array_almost_equal(tt,
mcolors.hsv_to_rgb(mcolors.rgb_to_hsv(tt)))
assert_array_almost_equal(tt,
mcolors.rgb_to_hsv(mcolors.hsv_to_rgb(tt)))
def test_autoscale_masked():
# Test for #2336. Previously fully masked data would trigger a ValueError.
data = np.ma.masked_all((12, 20))
plt.pcolor(data)
plt.draw()
@image_comparison(baseline_images=['light_source_shading_topo'],
extensions=['png'])
def test_light_source_topo_surface():
"""Shades a DEM using different v.e.'s and blend modes."""
with cbook.get_sample_data('jacksboro_fault_dem.npz') as file, \
np.load(file) as dem:
elev = dem['elevation']
dx, dy = dem['dx'], dem['dy']
# Get the true cellsize in meters for accurate vertical exaggeration
# Convert from decimal degrees to meters
dx = 111320.0 * dx * np.cos(dem['ymin'])
dy = 111320.0 * dy
ls = mcolors.LightSource(315, 45)
cmap = cm.gist_earth
fig, axes = plt.subplots(nrows=3, ncols=3)
for row, mode in zip(axes, ['hsv', 'overlay', 'soft']):
for ax, ve in zip(row, [0.1, 1, 10]):
rgb = ls.shade(elev, cmap, vert_exag=ve, dx=dx, dy=dy,
blend_mode=mode)
ax.imshow(rgb)
ax.set(xticks=[], yticks=[])
def test_light_source_shading_default():
"""Array comparison test for the default "hsv" blend mode. Ensure the
default result doesn't change without warning."""
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z = 10 * np.cos(x**2 + y**2)
cmap = plt.cm.copper
ls = mcolors.LightSource(315, 45)
rgb = ls.shade(z, cmap)
# Result stored transposed and rounded for more compact display...
expect = np.array(
[[[0.00, 0.45, 0.90, 0.90, 0.82, 0.62, 0.28, 0.00],
[0.45, 0.94, 0.99, 1.00, 1.00, 0.96, 0.65, 0.17],
[0.90, 0.99, 1.00, 1.00, 1.00, 1.00, 0.94, 0.35],
[0.90, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 0.49],
[0.82, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 0.41],
[0.62, 0.96, 1.00, 1.00, 1.00, 1.00, 0.90, 0.07],
[0.28, 0.65, 0.94, 1.00, 1.00, 0.90, 0.35, 0.01],
[0.00, 0.17, 0.35, 0.49, 0.41, 0.07, 0.01, 0.00]],
[[0.00, 0.28, 0.59, 0.72, 0.62, 0.40, 0.18, 0.00],
[0.28, 0.78, 0.93, 0.92, 0.83, 0.66, 0.39, 0.11],
[0.59, 0.93, 0.99, 1.00, 0.92, 0.75, 0.50, 0.21],
[0.72, 0.92, 1.00, 0.99, 0.93, 0.76, 0.51, 0.18],
[0.62, 0.83, 0.92, 0.93, 0.87, 0.68, 0.42, 0.08],
[0.40, 0.66, 0.75, 0.76, 0.68, 0.52, 0.23, 0.02],
[0.18, 0.39, 0.50, 0.51, 0.42, 0.23, 0.00, 0.00],
[0.00, 0.11, 0.21, 0.18, 0.08, 0.02, 0.00, 0.00]],
[[0.00, 0.18, 0.38, 0.46, 0.39, 0.26, 0.11, 0.00],
[0.18, 0.50, 0.70, 0.75, 0.64, 0.44, 0.25, 0.07],
[0.38, 0.70, 0.91, 0.98, 0.81, 0.51, 0.29, 0.13],
[0.46, 0.75, 0.98, 0.96, 0.84, 0.48, 0.22, 0.12],
[0.39, 0.64, 0.81, 0.84, 0.71, 0.31, 0.11, 0.05],
[0.26, 0.44, 0.51, 0.48, 0.31, 0.10, 0.03, 0.01],
[0.11, 0.25, 0.29, 0.22, 0.11, 0.03, 0.00, 0.00],
[0.00, 0.07, 0.13, 0.12, 0.05, 0.01, 0.00, 0.00]],
[[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]]
]).T
assert_array_almost_equal(rgb, expect, decimal=2)
# Numpy 1.9.1 fixed a bug in masked arrays which resulted in
# additional elements being masked when calculating the gradient thus
# the output is different with earlier numpy versions.
def test_light_source_masked_shading():
"""Array comparison test for a surface with a masked portion. Ensures that
we don't wind up with "fringes" of odd colors around masked regions."""
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z = 10 * np.cos(x**2 + y**2)
z = np.ma.masked_greater(z, 9.9)
cmap = plt.cm.copper
ls = mcolors.LightSource(315, 45)
rgb = ls.shade(z, cmap)
# Result stored transposed and rounded for more compact display...
expect = np.array(
[[[0.00, 0.46, 0.91, 0.91, 0.84, 0.64, 0.29, 0.00],
[0.46, 0.96, 1.00, 1.00, 1.00, 0.97, 0.67, 0.18],
[0.91, 1.00, 1.00, 1.00, 1.00, 1.00, 0.96, 0.36],
[0.91, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 0.51],
[0.84, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 0.44],
[0.64, 0.97, 1.00, 1.00, 1.00, 1.00, 0.94, 0.09],
[0.29, 0.67, 0.96, 1.00, 1.00, 0.94, 0.38, 0.01],
[0.00, 0.18, 0.36, 0.51, 0.44, 0.09, 0.01, 0.00]],
[[0.00, 0.29, 0.61, 0.75, 0.64, 0.41, 0.18, 0.00],
[0.29, 0.81, 0.95, 0.93, 0.85, 0.68, 0.40, 0.11],
[0.61, 0.95, 1.00, 0.78, 0.78, 0.77, 0.52, 0.22],
[0.75, 0.93, 0.78, 0.00, 0.00, 0.78, 0.54, 0.19],
[0.64, 0.85, 0.78, 0.00, 0.00, 0.78, 0.45, 0.08],
[0.41, 0.68, 0.77, 0.78, 0.78, 0.55, 0.25, 0.02],
[0.18, 0.40, 0.52, 0.54, 0.45, 0.25, 0.00, 0.00],
[0.00, 0.11, 0.22, 0.19, 0.08, 0.02, 0.00, 0.00]],
[[0.00, 0.19, 0.39, 0.48, 0.41, 0.26, 0.12, 0.00],
[0.19, 0.52, 0.73, 0.78, 0.66, 0.46, 0.26, 0.07],
[0.39, 0.73, 0.95, 0.50, 0.50, 0.53, 0.30, 0.14],
[0.48, 0.78, 0.50, 0.00, 0.00, 0.50, 0.23, 0.12],
[0.41, 0.66, 0.50, 0.00, 0.00, 0.50, 0.11, 0.05],
[0.26, 0.46, 0.53, 0.50, 0.50, 0.11, 0.03, 0.01],
[0.12, 0.26, 0.30, 0.23, 0.11, 0.03, 0.00, 0.00],
[0.00, 0.07, 0.14, 0.12, 0.05, 0.01, 0.00, 0.00]],
[[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]],
]).T
assert_array_almost_equal(rgb, expect, decimal=2)
def test_light_source_hillshading():
"""Compare the current hillshading method against one that should be
mathematically equivalent. Illuminates a cone from a range of angles."""
def alternative_hillshade(azimuth, elev, z):
illum = _sph2cart(*_azimuth2math(azimuth, elev))
illum = np.array(illum)
dy, dx = np.gradient(-z)
dy = -dy
dz = np.ones_like(dy)
normals = np.dstack([dx, dy, dz])
normals /= np.linalg.norm(normals, axis=2)[..., None]
intensity = np.tensordot(normals, illum, axes=(2, 0))
intensity -= intensity.min()
intensity /= intensity.ptp()
return intensity
y, x = np.mgrid[5:0:-1, :5]
z = -np.hypot(x - x.mean(), y - y.mean())
for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
ls = mcolors.LightSource(az, elev)
h1 = ls.hillshade(z)
h2 = alternative_hillshade(az, elev, z)
assert_array_almost_equal(h1, h2)
def test_light_source_planar_hillshading():
"""Ensure that the illumination intensity is correct for planar
surfaces."""
def plane(azimuth, elevation, x, y):
"""Create a plane whose normal vector is at the given azimuth and
elevation."""
theta, phi = _azimuth2math(azimuth, elevation)
a, b, c = _sph2cart(theta, phi)
z = -(a*x + b*y) / c
return z
def angled_plane(azimuth, elevation, angle, x, y):
"""Create a plane whose normal vector is at an angle from the given
azimuth and elevation."""
elevation = elevation + angle
if elevation > 90:
azimuth = (azimuth + 180) % 360
elevation = (90 - elevation) % 90
return plane(azimuth, elevation, x, y)
y, x = np.mgrid[5:0:-1, :5]
for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
ls = mcolors.LightSource(az, elev)
# Make a plane at a range of angles to the illumination
for angle in range(0, 105, 15):
z = angled_plane(az, elev, angle, x, y)
h = ls.hillshade(z)
assert_array_almost_equal(h, np.cos(np.radians(angle)))
def test_color_names():
assert mcolors.to_hex("blue") == "#0000ff"
assert mcolors.to_hex("xkcd:blue") == "#0343df"
assert mcolors.to_hex("tab:blue") == "#1f77b4"
def _sph2cart(theta, phi):
x = np.cos(theta) * np.sin(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(phi)
return x, y, z
def _azimuth2math(azimuth, elevation):
"""Converts from clockwise-from-north and up-from-horizontal to
mathematical conventions."""
theta = np.radians((90 - azimuth) % 360)
phi = np.radians(90 - elevation)
return theta, phi
def test_pandas_iterable(pd):
# Using a list or series yields equivalent
# color maps, i.e the series isn't seen as
# a single color
lst = ['red', 'blue', 'green']
s = pd.Series(lst)
cm1 = mcolors.ListedColormap(lst, N=5)
cm2 = mcolors.ListedColormap(s, N=5)
assert_array_equal(cm1.colors, cm2.colors)
@pytest.mark.parametrize('name', sorted(cm.cmap_d))
def test_colormap_reversing(name):
"""Check the generated _lut data of a colormap and corresponding
reversed colormap if they are almost the same."""
cmap = plt.get_cmap(name)
cmap_r = cmap.reversed()
if not cmap_r._isinit:
cmap._init()
cmap_r._init()
assert_array_almost_equal(cmap._lut[:-3], cmap_r._lut[-4::-1])
def test_cn():
matplotlib.rcParams['axes.prop_cycle'] = cycler('color',
['blue', 'r'])
assert mcolors.to_hex("C0") == '#0000ff'
assert mcolors.to_hex("C1") == '#ff0000'
matplotlib.rcParams['axes.prop_cycle'] = cycler('color',
['xkcd:blue', 'r'])
assert mcolors.to_hex("C0") == '#0343df'
assert mcolors.to_hex("C1") == '#ff0000'
assert mcolors.to_hex("C10") == '#0343df'
assert mcolors.to_hex("C11") == '#ff0000'
matplotlib.rcParams['axes.prop_cycle'] = cycler('color', ['8e4585', 'r'])
assert mcolors.to_hex("C0") == '#8e4585'
# if '8e4585' gets parsed as a float before it gets detected as a hex
# colour it will be interpreted as a very large number.
# this mustn't happen.
assert mcolors.to_rgb("C0")[0] != np.inf
def test_conversions():
# to_rgba_array("none") returns a (0, 4) array.
assert_array_equal(mcolors.to_rgba_array("none"), np.zeros((0, 4)))
# a list of grayscale levels, not a single color.
assert_array_equal(
mcolors.to_rgba_array([".2", ".5", ".8"]),
np.vstack([mcolors.to_rgba(c) for c in [".2", ".5", ".8"]]))
# alpha is properly set.
assert mcolors.to_rgba((1, 1, 1), .5) == (1, 1, 1, .5)
assert mcolors.to_rgba(".1", .5) == (.1, .1, .1, .5)
# builtin round differs between py2 and py3.
assert mcolors.to_hex((.7, .7, .7)) == "#b2b2b2"
# hex roundtrip.
hex_color = "#1234abcd"
assert mcolors.to_hex(mcolors.to_rgba(hex_color), keep_alpha=True) == \
hex_color
def test_failed_conversions():
with pytest.raises(ValueError):
mcolors.to_rgba('5')
with pytest.raises(ValueError):
mcolors.to_rgba('-1')
with pytest.raises(ValueError):
mcolors.to_rgba('nan')
with pytest.raises(ValueError):
# Gray must be a string to distinguish 3-4 grays from RGB or RGBA.
mcolors.to_rgba(0.4)
def test_grey_gray():
color_mapping = mcolors._colors_full_map
for k in color_mapping.keys():
if 'grey' in k:
assert color_mapping[k] == color_mapping[k.replace('grey', 'gray')]
if 'gray' in k:
assert color_mapping[k] == color_mapping[k.replace('gray', 'grey')]
def test_tableau_order():
dflt_cycle = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
assert list(mcolors.TABLEAU_COLORS.values()) == dflt_cycle
def test_ndarray_subclass_norm(recwarn):
# Emulate an ndarray subclass that handles units
# which objects when adding or subtracting with other
# arrays. See #6622 and #8696
class MyArray(np.ndarray):
def __isub__(self, other):
raise RuntimeError
def __add__(self, other):
raise RuntimeError
data = np.arange(-10, 10, 1, dtype=float).reshape((10, 2))
mydata = data.view(MyArray)
for norm in [mcolors.Normalize(), mcolors.LogNorm(),
mcolors.SymLogNorm(3, vmax=5, linscale=1),
mcolors.Normalize(vmin=mydata.min(), vmax=mydata.max()),
mcolors.SymLogNorm(3, vmin=mydata.min(), vmax=mydata.max()),
mcolors.PowerNorm(1)]:
assert_array_equal(norm(mydata), norm(data))
fig, ax = plt.subplots()
ax.imshow(mydata, norm=norm)
fig.canvas.draw()
assert len(recwarn) == 0
recwarn.clear()
def test_same_color():
assert mcolors.same_color('k', (0, 0, 0))
assert not mcolors.same_color('w', (1, 1, 0))
|
2373a2aa4e859be7dcb63f9a3b1ab902ff0aa9edd02e3958fa84f04db74ee9dc
|
import re
from matplotlib.backend_bases import (
FigureCanvasBase, LocationEvent, RendererBase)
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import matplotlib.path as path
import os
import numpy as np
import pytest
def test_uses_per_path():
id = transforms.Affine2D()
paths = [path.Path.unit_regular_polygon(i) for i in range(3, 7)]
tforms = [id.rotate(i) for i in range(1, 5)]
offsets = np.arange(20).reshape((10, 2))
facecolors = ['red', 'green']
edgecolors = ['red', 'green']
def check(master_transform, paths, all_transforms,
offsets, facecolors, edgecolors):
rb = RendererBase()
raw_paths = list(rb._iter_collection_raw_paths(
master_transform, paths, all_transforms))
gc = rb.new_gc()
ids = [path_id for xo, yo, path_id, gc0, rgbFace in
rb._iter_collection(gc, master_transform, all_transforms,
range(len(raw_paths)), offsets,
transforms.IdentityTransform(),
facecolors, edgecolors, [], [], [False],
[], 'data')]
uses = rb._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
if raw_paths:
seen = np.bincount(ids, minlength=len(raw_paths))
assert set(seen).issubset([uses - 1, uses])
check(id, paths, tforms, offsets, facecolors, edgecolors)
check(id, paths[0:1], tforms, offsets, facecolors, edgecolors)
check(id, [], tforms, offsets, facecolors, edgecolors)
check(id, paths, tforms[0:1], offsets, facecolors, edgecolors)
check(id, paths, [], offsets, facecolors, edgecolors)
for n in range(0, offsets.shape[0]):
check(id, paths, tforms, offsets[0:n, :], facecolors, edgecolors)
check(id, paths, tforms, offsets, [], edgecolors)
check(id, paths, tforms, offsets, facecolors, [])
check(id, paths, tforms, offsets, [], [])
check(id, paths, tforms, offsets, facecolors[0:1], edgecolors)
def test_get_default_filename(tmpdir):
plt.rcParams['savefig.directory'] = str(tmpdir)
fig = plt.figure()
canvas = FigureCanvasBase(fig)
filename = canvas.get_default_filename()
assert filename == 'image.png'
@pytest.mark.backend('pdf')
def test_non_gui_warning(monkeypatch):
plt.subplots()
monkeypatch.setitem(os.environ, "DISPLAY", ":999")
with pytest.warns(UserWarning) as rec:
plt.show()
assert len(rec) == 1
assert ('Matplotlib is currently using pdf, which is a non-GUI backend'
in str(rec[0].message))
with pytest.warns(UserWarning) as rec:
plt.gcf().show()
assert len(rec) == 1
assert ('Matplotlib is currently using pdf, which is a non-GUI backend'
in str(rec[0].message))
@pytest.mark.parametrize(
"x, y", [(42, 24), (None, 42), (None, None), (200, 100.01), (205.75, 2.0)])
def test_location_event_position(x, y):
# LocationEvent should cast its x and y arguments to int unless it is None.
fig, ax = plt.subplots()
canvas = FigureCanvasBase(fig)
event = LocationEvent("test_event", canvas, x, y)
if x is None:
assert event.x is None
else:
assert event.x == int(x)
assert isinstance(event.x, int)
if y is None:
assert event.y is None
else:
assert event.y == int(y)
assert isinstance(event.y, int)
if x is not None and y is not None:
assert re.match(
"x={} +y={}".format(ax.format_xdata(x), ax.format_ydata(y)),
ax.format_coord(x, y))
ax.fmt_xdata = ax.fmt_ydata = lambda x: "foo"
assert re.match("x=foo +y=foo", ax.format_coord(x, y))
|
7e38ba01b9b4b09eb8ac38689ebcd82895fd9f62d8b40f491c12029c72bfcfba
|
import warnings
import numpy as np
import pytest
import sys
from matplotlib import pyplot as plt
from matplotlib.testing.decorators import image_comparison
def draw_quiver(ax, **kw):
X, Y = np.meshgrid(np.arange(0, 2 * np.pi, 1),
np.arange(0, 2 * np.pi, 1))
U = np.cos(X)
V = np.sin(Y)
Q = ax.quiver(U, V, **kw)
return Q
def test_quiver_memory_leak():
fig, ax = plt.subplots()
Q = draw_quiver(ax)
ttX = Q.X
Q.remove()
del Q
assert sys.getrefcount(ttX) == 2
def test_quiver_key_memory_leak():
fig, ax = plt.subplots()
Q = draw_quiver(ax)
qk = ax.quiverkey(Q, 0.5, 0.92, 2, r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'})
assert sys.getrefcount(qk) == 3
qk.remove()
assert sys.getrefcount(qk) == 2
def test_no_warnings():
fig, ax = plt.subplots()
X, Y = np.meshgrid(np.arange(15), np.arange(10))
U = V = np.ones_like(X)
phi = (np.random.rand(15, 10) - .5) * 150
with warnings.catch_warnings(record=True) as w:
ax.quiver(X, Y, U, V, angles=phi)
fig.canvas.draw()
assert len(w) == 0
def test_zero_headlength():
# Based on report by Doug McNeil:
# http://matplotlib.1069221.n5.nabble.com/quiver-warnings-td28107.html
fig, ax = plt.subplots()
X, Y = np.meshgrid(np.arange(10), np.arange(10))
U, V = np.cos(X), np.sin(Y)
with warnings.catch_warnings(record=True) as w:
ax.quiver(U, V, headlength=0, headaxislength=0)
fig.canvas.draw()
assert len(w) == 0
@image_comparison(baseline_images=['quiver_animated_test_image'],
extensions=['png'])
def test_quiver_animate():
# Tests fix for #2616
fig, ax = plt.subplots()
Q = draw_quiver(ax, animated=True)
qk = ax.quiverkey(Q, 0.5, 0.92, 2, r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'})
@image_comparison(baseline_images=['quiver_with_key_test_image'],
extensions=['png'])
def test_quiver_with_key():
fig, ax = plt.subplots()
ax.margins(0.1)
Q = draw_quiver(ax)
qk = ax.quiverkey(Q, 0.5, 0.95, 2,
r'$2\, \mathrm{m}\, \mathrm{s}^{-1}$',
angle=-10,
coordinates='figure',
labelpos='W',
fontproperties={'weight': 'bold',
'size': 'large'})
@image_comparison(baseline_images=['quiver_single_test_image'],
extensions=['png'], remove_text=True)
def test_quiver_single():
fig, ax = plt.subplots()
ax.margins(0.1)
ax.quiver([1], [1], [2], [2])
def test_quiver_copy():
fig, ax = plt.subplots()
uv = dict(u=np.array([1.1]), v=np.array([2.0]))
q0 = ax.quiver([1], [1], uv['u'], uv['v'])
uv['v'][0] = 0
assert q0.V[0] == 2.0
@image_comparison(baseline_images=['quiver_key_pivot'],
extensions=['png'], remove_text=True)
def test_quiver_key_pivot():
fig, ax = plt.subplots()
u, v = np.mgrid[0:2*np.pi:10j, 0:2*np.pi:10j]
q = ax.quiver(np.sin(u), np.cos(v))
ax.set_xlim(-2, 11)
ax.set_ylim(-2, 11)
ax.quiverkey(q, 0.5, 1, 1, 'N', labelpos='N')
ax.quiverkey(q, 1, 0.5, 1, 'E', labelpos='E')
ax.quiverkey(q, 0.5, 0, 1, 'S', labelpos='S')
ax.quiverkey(q, 0, 0.5, 1, 'W', labelpos='W')
@image_comparison(baseline_images=['quiver_key_xy'],
extensions=['png'], remove_text=True)
def test_quiver_key_xy():
# With scale_units='xy', ensure quiverkey still matches its quiver.
# Note that the quiver and quiverkey lengths depend on the axes aspect
# ratio, and that with angles='xy' their angles also depend on the axes
# aspect ratio.
X = np.arange(8)
Y = np.zeros(8)
angles = X * (np.pi / 4)
uv = np.exp(1j * angles)
U = uv.real
V = uv.imag
fig, axs = plt.subplots(2)
for ax, angle_str in zip(axs, ('uv', 'xy')):
ax.set_xlim(-1, 8)
ax.set_ylim(-0.2, 0.2)
q = ax.quiver(X, Y, U, V, pivot='middle',
units='xy', width=0.05,
scale=2, scale_units='xy',
angles=angle_str)
for x, angle in zip((0.2, 0.5, 0.8), (0, 45, 90)):
ax.quiverkey(q, X=x, Y=0.8, U=1, angle=angle, label='', color='b')
@image_comparison(baseline_images=['barbs_test_image'],
extensions=['png'], remove_text=True)
def test_barbs():
x = np.linspace(-5, 5, 5)
X, Y = np.meshgrid(x, x)
U, V = 12*X, 12*Y
fig, ax = plt.subplots()
ax.barbs(X, Y, U, V, np.hypot(U, V), fill_empty=True, rounding=False,
sizes=dict(emptybarb=0.25, spacing=0.2, height=0.3),
cmap='viridis')
@image_comparison(baseline_images=['barbs_pivot_test_image'],
extensions=['png'], remove_text=True)
def test_barbs_pivot():
x = np.linspace(-5, 5, 5)
X, Y = np.meshgrid(x, x)
U, V = 12*X, 12*Y
fig, ax = plt.subplots()
ax.barbs(X, Y, U, V, fill_empty=True, rounding=False, pivot=1.7,
sizes=dict(emptybarb=0.25, spacing=0.2, height=0.3))
ax.scatter(X, Y, s=49, c='black')
def test_bad_masked_sizes():
'Test error handling when given differing sized masked arrays'
x = np.arange(3)
y = np.arange(3)
u = np.ma.array(15. * np.ones((4,)))
v = np.ma.array(15. * np.ones_like(u))
u[1] = np.ma.masked
v[1] = np.ma.masked
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.barbs(x, y, u, v)
def test_angles_and_scale():
# angles array + scale_units kwarg
fig, ax = plt.subplots()
X, Y = np.meshgrid(np.arange(15), np.arange(10))
U = V = np.ones_like(X)
phi = (np.random.rand(15, 10) - .5) * 150
ax.quiver(X, Y, U, V, angles=phi, scale_units='xy')
@image_comparison(baseline_images=['quiver_xy'],
extensions=['png'], remove_text=True)
def test_quiver_xy():
# simple arrow pointing from SW to NE
fig, ax = plt.subplots(subplot_kw=dict(aspect='equal'))
ax.quiver(0, 0, 1, 1, angles='xy', scale_units='xy', scale=1)
ax.set_xlim(0, 1.1)
ax.set_ylim(0, 1.1)
ax.grid()
def test_quiverkey_angles():
# Check that only a single arrow is plotted for a quiverkey when an array
# of angles is given to the original quiver plot
fig, ax = plt.subplots()
X, Y = np.meshgrid(np.arange(2), np.arange(2))
U = V = angles = np.ones_like(X)
q = ax.quiver(X, Y, U, V, angles=angles)
qk = ax.quiverkey(q, 1, 1, 2, 'Label')
# The arrows are only created when the key is drawn
fig.canvas.draw()
assert len(qk.verts) == 1
|
cda863e51778a41d31b4073fdca5fb7d05b2072b343fe21093e0aad1cb3c5606
|
import matplotlib.gridspec as gridspec
import pytest
def test_equal():
gs = gridspec.GridSpec(2, 1)
assert gs[0, 0] == gs[0, 0]
assert gs[:, 0] == gs[:, 0]
def test_width_ratios():
"""
Addresses issue #5835.
See at https://github.com/matplotlib/matplotlib/issues/5835.
"""
with pytest.raises(ValueError):
gridspec.GridSpec(1, 1, width_ratios=[2, 1, 3])
def test_height_ratios():
"""
Addresses issue #5835.
See at https://github.com/matplotlib/matplotlib/issues/5835.
"""
with pytest.raises(ValueError):
gridspec.GridSpec(1, 1, height_ratios=[2, 1, 3])
|
0c806a212cd42186a74b135624bf67da8a1bd5d44b38e02be93233d814d62b52
|
import matplotlib
import matplotlib.rcsetup
def test_use_doc_standard_backends():
"""
Test that the standard backends mentioned in the docstring of
matplotlib.use() are the same as in matplotlib.rcsetup.
"""
def parse(key):
backends = []
for line in matplotlib.use.__doc__.split(key)[1].split('\n'):
if not line.strip():
break
backends += [e.strip() for e in line.split(',') if e]
return backends
assert (set(parse('- interactive backends:\n')) ==
set(matplotlib.rcsetup.interactive_bk))
assert (set(parse('- non-interactive backends:\n')) ==
set(matplotlib.rcsetup.non_interactive_bk))
|
0b0d1db83fa08d7fcbfe00833ff01929c984ef58b4e803cf1ff2e15b1de28eef
|
from io import BytesIO
import os
from pathlib import Path
import shutil
import sys
import warnings
import numpy as np
import pytest
from matplotlib import font_manager as fm
from matplotlib.font_manager import (
findfont, findSystemFonts, FontProperties, fontManager, json_dump,
json_load, get_font, get_fontconfig_fonts, is_opentype_cff_font,
MSUserFontDirectories, _call_fc_list)
from matplotlib import pyplot as plt, rc_context
has_fclist = shutil.which('fc-list') is not None
def test_font_priority():
with rc_context(rc={
'font.sans-serif':
['cmmi10', 'Bitstream Vera Sans']}):
font = findfont(FontProperties(family=["sans-serif"]))
assert Path(font).name == 'cmmi10.ttf'
# Smoketest get_charmap, which isn't used internally anymore
font = get_font(font)
cmap = font.get_charmap()
assert len(cmap) == 131
assert cmap[8729] == 30
def test_score_weight():
assert 0 == fontManager.score_weight("regular", "regular")
assert 0 == fontManager.score_weight("bold", "bold")
assert (0 < fontManager.score_weight(400, 400) <
fontManager.score_weight("normal", "bold"))
assert (0 < fontManager.score_weight("normal", "regular") <
fontManager.score_weight("normal", "bold"))
assert (fontManager.score_weight("normal", "regular") ==
fontManager.score_weight(400, 400))
def test_json_serialization(tmpdir):
# Can't open a NamedTemporaryFile twice on Windows, so use a temporary
# directory instead.
path = Path(tmpdir, "fontlist.json")
json_dump(fontManager, path)
copy = json_load(path)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'findfont: Font family.*not found')
for prop in ({'family': 'STIXGeneral'},
{'family': 'Bitstream Vera Sans', 'weight': 700},
{'family': 'no such font family'}):
fp = FontProperties(**prop)
assert (fontManager.findfont(fp, rebuild_if_missing=False) ==
copy.findfont(fp, rebuild_if_missing=False))
def test_otf():
fname = '/usr/share/fonts/opentype/freefont/FreeMono.otf'
if Path(fname).exists():
assert is_opentype_cff_font(fname)
for f in fontManager.ttflist:
if 'otf' in f.fname:
with open(f.fname, 'rb') as fd:
res = fd.read(4) == b'OTTO'
assert res == is_opentype_cff_font(f.fname)
@pytest.mark.skipif(not has_fclist, reason='no fontconfig installed')
def test_get_fontconfig_fonts():
assert len(get_fontconfig_fonts()) > 1
@pytest.mark.parametrize('factor', [2, 4, 6, 8])
def test_hinting_factor(factor):
font = findfont(FontProperties(family=["sans-serif"]))
font1 = get_font(font, hinting_factor=1)
font1.clear()
font1.set_size(12, 100)
font1.set_text('abc')
expected = font1.get_width_height()
hinted_font = get_font(font, hinting_factor=factor)
hinted_font.clear()
hinted_font.set_size(12, 100)
hinted_font.set_text('abc')
# Check that hinting only changes text layout by a small (10%) amount.
np.testing.assert_allclose(hinted_font.get_width_height(), expected,
rtol=0.1)
@pytest.mark.skipif(sys.platform != "win32",
reason="Need Windows font to test against")
def test_utf16m_sfnt():
segoe_ui_semibold = None
for f in fontManager.ttflist:
# seguisbi = Microsoft Segoe UI Semibold
if f.fname[-12:] == "seguisbi.ttf":
segoe_ui_semibold = f
break
else:
pytest.xfail(reason="Couldn't find font to test against.")
# Check that we successfully read the "semibold" from the font's
# sfnt table and set its weight accordingly
assert segoe_ui_semibold.weight == "semibold"
@pytest.mark.xfail(not (os.environ.get("TRAVIS") and sys.platform == "linux"),
reason="Font may be missing.")
def test_find_ttc():
fp = FontProperties(family=["WenQuanYi Zen Hei"])
if Path(findfont(fp)).name != "wqy-zenhei.ttc":
# Travis appears to fail to pick up the ttc file sometimes. Try to
# rebuild the cache and try again.
fm._rebuild()
assert Path(findfont(fp)).name == "wqy-zenhei.ttc"
fig, ax = plt.subplots()
ax.text(.5, .5, "\N{KANGXI RADICAL DRAGON}", fontproperties=fp)
fig.savefig(BytesIO(), format="raw")
fig.savefig(BytesIO(), format="svg")
with pytest.raises(RuntimeError):
fig.savefig(BytesIO(), format="pdf")
with pytest.raises(RuntimeError):
fig.savefig(BytesIO(), format="ps")
@pytest.mark.skipif(sys.platform != 'linux', reason='Linux only')
def test_user_fonts_linux(tmpdir, monkeypatch):
font_test_file = 'mpltest.ttf'
# Precondition: the test font should not be available
fonts = findSystemFonts()
if any(font_test_file in font for font in fonts):
pytest.skip(f'{font_test_file} already exists in system fonts')
# Prepare a temporary user font directory
user_fonts_dir = tmpdir.join('fonts')
user_fonts_dir.ensure(dir=True)
shutil.copyfile(Path(__file__).parent / font_test_file,
user_fonts_dir.join(font_test_file))
with monkeypatch.context() as m:
m.setenv('XDG_DATA_HOME', str(tmpdir))
_call_fc_list.cache_clear()
# Now, the font should be available
fonts = findSystemFonts()
assert any(font_test_file in font for font in fonts)
# Make sure the temporary directory is no longer cached.
_call_fc_list.cache_clear()
@pytest.mark.skipif(sys.platform != 'win32', reason='Windows only')
def test_user_fonts_win32():
if not os.environ.get('APPVEYOR', False):
pytest.xfail('This test does only work on appveyor since user fonts '
'are Windows specific and the developer\'s font '
'directory should remain unchanged')
font_test_file = 'mpltest.ttf'
# Precondition: the test font should not be available
fonts = findSystemFonts()
if any(font_test_file in font for font in fonts):
pytest.skip(f'{font_test_file} already exists in system fonts')
user_fonts_dir = MSUserFontDirectories[0]
# Make sure that the user font directory exists (this is probably not the
# case on Windows versions < 1809)
os.makedirs(user_fonts_dir)
# Copy the test font to the user font directory
shutil.copyfile(os.path.join(os.path.dirname(__file__), font_test_file),
os.path.join(user_fonts_dir, font_test_file))
# Now, the font should be available
fonts = findSystemFonts()
assert any(font_test_file in font for font in fonts)
|
06868ea9aca33368a6177bac76d8ab0f4c2bab4fd7ef94a19081652d5d380f93
|
from unittest.mock import Mock
import matplotlib.widgets as widgets
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
from numpy.testing import assert_allclose
import pytest
def get_ax():
fig, ax = plt.subplots(1, 1)
ax.plot([0, 200], [0, 200])
ax.set_aspect(1.0)
ax.figure.canvas.draw()
return ax
def do_event(tool, etype, button=1, xdata=0, ydata=0, key=None, step=1):
"""
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events)
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
"""
event = Mock()
event.button = button
ax = tool.ax
event.x, event.y = ax.transData.transform([(xdata, ydata),
(xdata, ydata)])[00]
event.xdata, event.ydata = xdata, ydata
event.inaxes = ax
event.canvas = ax.figure.canvas
event.key = key
event.step = step
event.guiEvent = None
event.name = 'Custom'
func = getattr(tool, etype)
func(event)
def check_rectangle(**kwargs):
ax = get_ax()
def onselect(epress, erelease):
ax._got_onselect = True
assert epress.xdata == 100
assert epress.ydata == 100
assert erelease.xdata == 199
assert erelease.ydata == 199
tool = widgets.RectangleSelector(ax, onselect, **kwargs)
do_event(tool, 'press', xdata=100, ydata=100, button=1)
do_event(tool, 'onmove', xdata=199, ydata=199, button=1)
# purposely drag outside of axis for release
do_event(tool, 'release', xdata=250, ydata=250, button=1)
if kwargs.get('drawtype', None) not in ['line', 'none']:
assert_allclose(tool.geometry,
[[100., 100, 199, 199, 100],
[100, 199, 199, 100, 100]],
err_msg=tool.geometry)
assert ax._got_onselect
def test_rectangle_selector():
check_rectangle()
check_rectangle(drawtype='line', useblit=False)
check_rectangle(useblit=True, button=1)
check_rectangle(drawtype='none', minspanx=10, minspany=10)
check_rectangle(minspanx=10, minspany=10, spancoords='pixels')
check_rectangle(rectprops=dict(fill=True))
def test_ellipse():
"""For ellipse, test out the key modifiers"""
ax = get_ax()
def onselect(epress, erelease):
pass
tool = widgets.EllipseSelector(ax, onselect=onselect,
maxdist=10, interactive=True)
tool.extents = (100, 150, 100, 150)
# drag the rectangle
do_event(tool, 'press', xdata=10, ydata=10, button=1,
key=' ')
do_event(tool, 'onmove', xdata=30, ydata=30, button=1)
do_event(tool, 'release', xdata=30, ydata=30, button=1)
assert tool.extents == (120, 170, 120, 170)
# create from center
do_event(tool, 'on_key_press', xdata=100, ydata=100, button=1,
key='control')
do_event(tool, 'press', xdata=100, ydata=100, button=1)
do_event(tool, 'onmove', xdata=125, ydata=125, button=1)
do_event(tool, 'release', xdata=125, ydata=125, button=1)
do_event(tool, 'on_key_release', xdata=100, ydata=100, button=1,
key='control')
assert tool.extents == (75, 125, 75, 125)
# create a square
do_event(tool, 'on_key_press', xdata=10, ydata=10, button=1,
key='shift')
do_event(tool, 'press', xdata=10, ydata=10, button=1)
do_event(tool, 'onmove', xdata=35, ydata=30, button=1)
do_event(tool, 'release', xdata=35, ydata=30, button=1)
do_event(tool, 'on_key_release', xdata=10, ydata=10, button=1,
key='shift')
extents = [int(e) for e in tool.extents]
assert extents == [10, 35, 10, 34]
# create a square from center
do_event(tool, 'on_key_press', xdata=100, ydata=100, button=1,
key='ctrl+shift')
do_event(tool, 'press', xdata=100, ydata=100, button=1)
do_event(tool, 'onmove', xdata=125, ydata=130, button=1)
do_event(tool, 'release', xdata=125, ydata=130, button=1)
do_event(tool, 'on_key_release', xdata=100, ydata=100, button=1,
key='ctrl+shift')
extents = [int(e) for e in tool.extents]
assert extents == [70, 129, 70, 130]
assert tool.geometry.shape == (2, 73)
assert_allclose(tool.geometry[:, 0], [70., 100])
def test_rectangle_handles():
ax = get_ax()
def onselect(epress, erelease):
pass
tool = widgets.RectangleSelector(ax, onselect=onselect,
maxdist=10, interactive=True)
tool.extents = (100, 150, 100, 150)
assert tool.corners == (
(100, 150, 150, 100), (100, 100, 150, 150))
assert tool.extents == (100, 150, 100, 150)
assert tool.edge_centers == (
(100, 125.0, 150, 125.0), (125.0, 100, 125.0, 150))
assert tool.extents == (100, 150, 100, 150)
# grab a corner and move it
do_event(tool, 'press', xdata=100, ydata=100)
do_event(tool, 'onmove', xdata=120, ydata=120)
do_event(tool, 'release', xdata=120, ydata=120)
assert tool.extents == (120, 150, 120, 150)
# grab the center and move it
do_event(tool, 'press', xdata=132, ydata=132)
do_event(tool, 'onmove', xdata=120, ydata=120)
do_event(tool, 'release', xdata=120, ydata=120)
assert tool.extents == (108, 138, 108, 138)
# create a new rectangle
do_event(tool, 'press', xdata=10, ydata=10)
do_event(tool, 'onmove', xdata=100, ydata=100)
do_event(tool, 'release', xdata=100, ydata=100)
assert tool.extents == (10, 100, 10, 100)
def check_span(*args, **kwargs):
ax = get_ax()
def onselect(vmin, vmax):
ax._got_onselect = True
assert vmin == 100
assert vmax == 150
def onmove(vmin, vmax):
assert vmin == 100
assert vmax == 125
ax._got_on_move = True
if 'onmove_callback' in kwargs:
kwargs['onmove_callback'] = onmove
tool = widgets.SpanSelector(ax, onselect, *args, **kwargs)
do_event(tool, 'press', xdata=100, ydata=100, button=1)
do_event(tool, 'onmove', xdata=125, ydata=125, button=1)
do_event(tool, 'release', xdata=150, ydata=150, button=1)
assert ax._got_onselect
if 'onmove_callback' in kwargs:
assert ax._got_on_move
def test_span_selector():
check_span('horizontal', minspan=10, useblit=True)
check_span('vertical', onmove_callback=True, button=1)
check_span('horizontal', rectprops=dict(fill=True))
def check_lasso_selector(**kwargs):
ax = get_ax()
def onselect(verts):
ax._got_onselect = True
assert verts == [(100, 100), (125, 125), (150, 150)]
tool = widgets.LassoSelector(ax, onselect, **kwargs)
do_event(tool, 'press', xdata=100, ydata=100, button=1)
do_event(tool, 'onmove', xdata=125, ydata=125, button=1)
do_event(tool, 'release', xdata=150, ydata=150, button=1)
assert ax._got_onselect
def test_lasso_selector():
check_lasso_selector()
check_lasso_selector(useblit=False, lineprops=dict(color='red'))
check_lasso_selector(useblit=True, button=1)
def test_CheckButtons():
ax = get_ax()
check = widgets.CheckButtons(ax, ('a', 'b', 'c'), (True, False, True))
assert check.get_status() == [True, False, True]
check.set_active(0)
assert check.get_status() == [False, False, True]
cid = check.on_clicked(lambda: None)
check.disconnect(cid)
@image_comparison(baseline_images=['check_radio_buttons'], extensions=['png'],
style='mpl20', remove_text=True)
def test_check_radio_buttons_image():
get_ax()
plt.subplots_adjust(left=0.3)
rax1 = plt.axes([0.05, 0.7, 0.15, 0.15])
rax2 = plt.axes([0.05, 0.2, 0.15, 0.15])
widgets.RadioButtons(rax1, ('Radio 1', 'Radio 2', 'Radio 3'))
widgets.CheckButtons(rax2, ('Check 1', 'Check 2', 'Check 3'),
(False, True, True))
@image_comparison(baseline_images=['check_bunch_of_radio_buttons'],
style='mpl20', extensions=['png'], remove_text=True)
def test_check_bunch_of_radio_buttons():
rax = plt.axes([0.05, 0.1, 0.15, 0.7])
widgets.RadioButtons(rax, ('B1', 'B2', 'B3', 'B4', 'B5', 'B6',
'B7', 'B8', 'B9', 'B10', 'B11', 'B12',
'B13', 'B14', 'B15'))
def test_slider_slidermin_slidermax_invalid():
fig, ax = plt.subplots()
# test min/max with floats
with pytest.raises(ValueError):
widgets.Slider(ax=ax, label='', valmin=0.0, valmax=24.0,
slidermin=10.0)
with pytest.raises(ValueError):
widgets.Slider(ax=ax, label='', valmin=0.0, valmax=24.0,
slidermax=10.0)
def test_slider_slidermin_slidermax():
fig, ax = plt.subplots()
slider_ = widgets.Slider(ax=ax, label='', valmin=0.0, valmax=24.0,
valinit=5.0)
slider = widgets.Slider(ax=ax, label='', valmin=0.0, valmax=24.0,
valinit=1.0, slidermin=slider_)
assert slider.val == slider_.val
slider = widgets.Slider(ax=ax, label='', valmin=0.0, valmax=24.0,
valinit=10.0, slidermax=slider_)
assert slider.val == slider_.val
def test_slider_valmin_valmax():
fig, ax = plt.subplots()
slider = widgets.Slider(ax=ax, label='', valmin=0.0, valmax=24.0,
valinit=-10.0)
assert slider.val == slider.valmin
slider = widgets.Slider(ax=ax, label='', valmin=0.0, valmax=24.0,
valinit=25.0)
assert slider.val == slider.valmax
def test_slider_horizontal_vertical():
fig, ax = plt.subplots()
slider = widgets.Slider(ax=ax, label='', valmin=0, valmax=24,
valinit=12, orientation='horizontal')
slider.set_val(10)
assert slider.val == 10
# check the dimension of the slider patch in axes units
box = slider.poly.get_extents().transformed(ax.transAxes.inverted())
assert_allclose(box.bounds, [0, 0, 10/24, 1])
fig, ax = plt.subplots()
slider = widgets.Slider(ax=ax, label='', valmin=0, valmax=24,
valinit=12, orientation='vertical')
slider.set_val(10)
assert slider.val == 10
# check the dimension of the slider patch in axes units
box = slider.poly.get_extents().transformed(ax.transAxes.inverted())
assert_allclose(box.bounds, [0, 0, 1, 10/24])
def check_polygon_selector(event_sequence, expected_result, selections_count):
"""Helper function to test Polygon Selector
Parameters
----------
event_sequence : list of tuples (etype, dict())
A sequence of events to perform. The sequence is a list of tuples
where the first element of the tuple is an etype (e.g., 'onmove',
'press', etc.), and the second element of the tuple is a dictionary of
the arguments for the event (e.g., xdata=5, key='shift', etc.).
expected_result : list of vertices (xdata, ydata)
The list of vertices that are expected to result from the event
sequence.
selections_count : int
Wait for the tool to call its `onselect` function `selections_count`
times, before comparing the result to the `expected_result`
"""
ax = get_ax()
ax._selections_count = 0
def onselect(vertices):
ax._selections_count += 1
ax._current_result = vertices
tool = widgets.PolygonSelector(ax, onselect)
for (etype, event_args) in event_sequence:
do_event(tool, etype, **event_args)
assert ax._selections_count == selections_count
assert ax._current_result == expected_result
def polygon_place_vertex(xdata, ydata):
return [('onmove', dict(xdata=xdata, ydata=ydata)),
('press', dict(xdata=xdata, ydata=ydata)),
('release', dict(xdata=xdata, ydata=ydata))]
def test_polygon_selector():
# Simple polygon
expected_result = [(50, 50), (150, 50), (50, 150)]
event_sequence = (polygon_place_vertex(50, 50)
+ polygon_place_vertex(150, 50)
+ polygon_place_vertex(50, 150)
+ polygon_place_vertex(50, 50))
check_polygon_selector(event_sequence, expected_result, 1)
# Move first vertex before completing the polygon.
expected_result = [(75, 50), (150, 50), (50, 150)]
event_sequence = (polygon_place_vertex(50, 50)
+ polygon_place_vertex(150, 50)
+ [('on_key_press', dict(key='control')),
('onmove', dict(xdata=50, ydata=50)),
('press', dict(xdata=50, ydata=50)),
('onmove', dict(xdata=75, ydata=50)),
('release', dict(xdata=75, ydata=50)),
('on_key_release', dict(key='control'))]
+ polygon_place_vertex(50, 150)
+ polygon_place_vertex(75, 50))
check_polygon_selector(event_sequence, expected_result, 1)
# Move first two vertices at once before completing the polygon.
expected_result = [(50, 75), (150, 75), (50, 150)]
event_sequence = (polygon_place_vertex(50, 50)
+ polygon_place_vertex(150, 50)
+ [('on_key_press', dict(key='shift')),
('onmove', dict(xdata=100, ydata=100)),
('press', dict(xdata=100, ydata=100)),
('onmove', dict(xdata=100, ydata=125)),
('release', dict(xdata=100, ydata=125)),
('on_key_release', dict(key='shift'))]
+ polygon_place_vertex(50, 150)
+ polygon_place_vertex(50, 75))
check_polygon_selector(event_sequence, expected_result, 1)
# Move first vertex after completing the polygon.
expected_result = [(75, 50), (150, 50), (50, 150)]
event_sequence = (polygon_place_vertex(50, 50)
+ polygon_place_vertex(150, 50)
+ polygon_place_vertex(50, 150)
+ polygon_place_vertex(50, 50)
+ [('onmove', dict(xdata=50, ydata=50)),
('press', dict(xdata=50, ydata=50)),
('onmove', dict(xdata=75, ydata=50)),
('release', dict(xdata=75, ydata=50))])
check_polygon_selector(event_sequence, expected_result, 2)
# Move all vertices after completing the polygon.
expected_result = [(75, 75), (175, 75), (75, 175)]
event_sequence = (polygon_place_vertex(50, 50)
+ polygon_place_vertex(150, 50)
+ polygon_place_vertex(50, 150)
+ polygon_place_vertex(50, 50)
+ [('on_key_press', dict(key='shift')),
('onmove', dict(xdata=100, ydata=100)),
('press', dict(xdata=100, ydata=100)),
('onmove', dict(xdata=125, ydata=125)),
('release', dict(xdata=125, ydata=125)),
('on_key_release', dict(key='shift'))])
check_polygon_selector(event_sequence, expected_result, 2)
# Try to move a vertex and move all before placing any vertices.
expected_result = [(50, 50), (150, 50), (50, 150)]
event_sequence = ([('on_key_press', dict(key='control')),
('onmove', dict(xdata=100, ydata=100)),
('press', dict(xdata=100, ydata=100)),
('onmove', dict(xdata=125, ydata=125)),
('release', dict(xdata=125, ydata=125)),
('on_key_release', dict(key='control')),
('on_key_press', dict(key='shift')),
('onmove', dict(xdata=100, ydata=100)),
('press', dict(xdata=100, ydata=100)),
('onmove', dict(xdata=125, ydata=125)),
('release', dict(xdata=125, ydata=125)),
('on_key_release', dict(key='shift'))]
+ polygon_place_vertex(50, 50)
+ polygon_place_vertex(150, 50)
+ polygon_place_vertex(50, 150)
+ polygon_place_vertex(50, 50))
check_polygon_selector(event_sequence, expected_result, 1)
# Try to place vertex out-of-bounds, then reset, and start a new polygon.
expected_result = [(50, 50), (150, 50), (50, 150)]
event_sequence = (polygon_place_vertex(50, 50)
+ polygon_place_vertex(250, 50)
+ [('on_key_press', dict(key='escape')),
('on_key_release', dict(key='escape'))]
+ polygon_place_vertex(50, 50)
+ polygon_place_vertex(150, 50)
+ polygon_place_vertex(50, 150)
+ polygon_place_vertex(50, 50))
check_polygon_selector(event_sequence, expected_result, 1)
|
77c1572ae6a21676192892b33115b3b189230f9ff79cb10cc027193c01005189
|
import base64
import io
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import pytest
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib import patches, transforms
from matplotlib.path import Path
# NOTE: All of these tests assume that path.simplify is set to True
# (the default)
@image_comparison(baseline_images=['clipping'], remove_text=True)
def test_clipping():
t = np.arange(0.0, 2.0, 0.01)
s = np.sin(2*np.pi*t)
fig, ax = plt.subplots()
ax.plot(t, s, linewidth=1.0)
ax.set_ylim((-0.20, -0.28))
@image_comparison(baseline_images=['overflow'], remove_text=True)
def test_overflow():
x = np.array([1.0, 2.0, 3.0, 2.0e5])
y = np.arange(len(x))
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_xlim(2, 6)
@image_comparison(baseline_images=['clipping_diamond'], remove_text=True)
def test_diamond():
x = np.array([0.0, 1.0, 0.0, -1.0, 0.0])
y = np.array([1.0, 0.0, -1.0, 0.0, 1.0])
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_xlim(-0.6, 0.6)
ax.set_ylim(-0.6, 0.6)
def test_noise():
np.random.seed(0)
x = np.random.uniform(size=50000) * 50
fig, ax = plt.subplots()
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = path.cleaned(simplify=True)
assert simplified.vertices.size == 25512
def test_antiparallel_simplification():
def _get_simplified(x, y):
fig, ax = plt.subplots()
p1 = ax.plot(x, y)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = path.cleaned(simplify=True)
simplified = transform.inverted().transform_path(simplified)
return simplified
# test ending on a maximum
x = [0, 0, 0, 0, 0, 1]
y = [.5, 1, -1, 1, 2, .5]
simplified = _get_simplified(x, y)
assert_array_almost_equal([[0., 0.5],
[0., -1.],
[0., 2.],
[1., 0.5]],
simplified.vertices[:-2, :])
# test ending on a minimum
x = [0, 0, 0, 0, 0, 1]
y = [.5, 1, -1, 1, -2, .5]
simplified = _get_simplified(x, y)
assert_array_almost_equal([[0., 0.5],
[0., 1.],
[0., -2.],
[1., 0.5]],
simplified.vertices[:-2, :])
# test ending in between
x = [0, 0, 0, 0, 0, 1]
y = [.5, 1, -1, 1, 0, .5]
simplified = _get_simplified(x, y)
assert_array_almost_equal([[0., 0.5],
[0., 1.],
[0., -1.],
[0., 0.],
[1., 0.5]],
simplified.vertices[:-2, :])
# test no anti-parallel ending at max
x = [0, 0, 0, 0, 0, 1]
y = [.5, 1, 2, 1, 3, .5]
simplified = _get_simplified(x, y)
assert_array_almost_equal([[0., 0.5],
[0., 3.],
[1., 0.5]],
simplified.vertices[:-2, :])
# test no anti-parallel ending in middle
x = [0, 0, 0, 0, 0, 1]
y = [.5, 1, 2, 1, 1, .5]
simplified = _get_simplified(x, y)
assert_array_almost_equal([[0., 0.5],
[0., 2.],
[0., 1.],
[1., 0.5]],
simplified.vertices[:-2, :])
# Only consider angles in 0 <= angle <= pi/2, otherwise
# using min/max will get the expected results out of order:
# min/max for simplification code depends on original vector,
# and if angle is outside above range then simplification
# min/max will be opposite from actual min/max.
@pytest.mark.parametrize('angle', [0, np.pi/4, np.pi/3, np.pi/2])
@pytest.mark.parametrize('offset', [0, .5])
def test_angled_antiparallel(angle, offset):
scale = 5
np.random.seed(19680801)
# get 15 random offsets
# TODO: guarantee offset > 0 results in some offsets < 0
vert_offsets = (np.random.rand(15) - offset) * scale
# always start at 0 so rotation makes sense
vert_offsets[0] = 0
# always take the first step the same direction
vert_offsets[1] = 1
# compute points along a diagonal line
x = np.sin(angle) * vert_offsets
y = np.cos(angle) * vert_offsets
# will check these later
x_max = x[1:].max()
x_min = x[1:].min()
y_max = y[1:].max()
y_min = y[1:].min()
if offset > 0:
p_expected = Path([[0, 0],
[x_max, y_max],
[x_min, y_min],
[x[-1], y[-1]],
[0, 0]],
codes=[1, 2, 2, 2, 0])
else:
p_expected = Path([[0, 0],
[x_max, y_max],
[x[-1], y[-1]],
[0, 0]],
codes=[1, 2, 2, 0])
p = Path(np.vstack([x, y]).T)
p2 = p.cleaned(simplify=True)
assert_array_almost_equal(p_expected.vertices,
p2.vertices)
assert_array_equal(p_expected.codes, p2.codes)
def test_sine_plus_noise():
np.random.seed(0)
x = (np.sin(np.linspace(0, np.pi * 2.0, 50000)) +
np.random.uniform(size=50000) * 0.01)
fig, ax = plt.subplots()
p1 = ax.plot(x, solid_joinstyle='round', linewidth=2.0)
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = path.cleaned(simplify=True)
assert simplified.vertices.size == 25240
@image_comparison(baseline_images=['simplify_curve'], remove_text=True)
def test_simplify_curve():
pp1 = patches.PathPatch(
Path([(0, 0), (1, 0), (1, 1), (np.nan, 1), (0, 0), (2, 0), (2, 2),
(0, 0)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3, Path.CURVE3, Path.CURVE3,
Path.CURVE3, Path.CURVE3, Path.CLOSEPOLY]),
fc="none")
fig, ax = plt.subplots()
ax.add_patch(pp1)
ax.set_xlim((0, 2))
ax.set_ylim((0, 2))
@image_comparison(baseline_images=['hatch_simplify'], remove_text=True)
def test_hatch():
fig, ax = plt.subplots()
ax.add_patch(plt.Rectangle((0, 0), 1, 1, fill=False, hatch="/"))
ax.set_xlim((0.45, 0.55))
ax.set_ylim((0.45, 0.55))
@image_comparison(baseline_images=['fft_peaks'], remove_text=True)
def test_fft_peaks():
fig, ax = plt.subplots()
t = np.arange(65536)
p1 = ax.plot(abs(np.fft.fft(np.sin(2*np.pi*.01*t)*np.blackman(len(t)))))
path = p1[0].get_path()
transform = p1[0].get_transform()
path = transform.transform_path(path)
simplified = path.cleaned(simplify=True)
assert simplified.vertices.size == 36
def test_start_with_moveto():
# Should be entirely clipped away to a single MOVETO
data = b"""
ZwAAAAku+v9UAQAA+Tj6/z8CAADpQ/r/KAMAANlO+v8QBAAAyVn6//UEAAC6ZPr/2gUAAKpv+v+8
BgAAm3r6/50HAACLhfr/ewgAAHyQ+v9ZCQAAbZv6/zQKAABepvr/DgsAAE+x+v/lCwAAQLz6/7wM
AAAxx/r/kA0AACPS+v9jDgAAFN36/zQPAAAF6Pr/AxAAAPfy+v/QEAAA6f36/5wRAADbCPv/ZhIA
AMwT+/8uEwAAvh77//UTAACwKfv/uRQAAKM0+/98FQAAlT/7/z0WAACHSvv//RYAAHlV+/+7FwAA
bGD7/3cYAABea/v/MRkAAFF2+//pGQAARIH7/6AaAAA3jPv/VRsAACmX+/8JHAAAHKL7/7ocAAAP
rfv/ah0AAAO4+/8YHgAA9sL7/8QeAADpzfv/bx8AANzY+/8YIAAA0OP7/78gAADD7vv/ZCEAALf5
+/8IIgAAqwT8/6kiAACeD/z/SiMAAJIa/P/oIwAAhiX8/4QkAAB6MPz/HyUAAG47/P+4JQAAYkb8
/1AmAABWUfz/5SYAAEpc/P95JwAAPmf8/wsoAAAzcvz/nCgAACd9/P8qKQAAHIj8/7cpAAAQk/z/
QyoAAAWe/P/MKgAA+aj8/1QrAADus/z/2isAAOO+/P9eLAAA2Mn8/+AsAADM1Pz/YS0AAMHf/P/g
LQAAtur8/10uAACr9fz/2C4AAKEA/f9SLwAAlgv9/8ovAACLFv3/QDAAAIAh/f+1MAAAdSz9/ycx
AABrN/3/mDEAAGBC/f8IMgAAVk39/3UyAABLWP3/4TIAAEFj/f9LMwAANm79/7MzAAAsef3/GjQA
ACKE/f9+NAAAF4/9/+E0AAANmv3/QzUAAAOl/f+iNQAA+a/9/wA2AADvuv3/XDYAAOXF/f+2NgAA
29D9/w83AADR2/3/ZjcAAMfm/f+7NwAAvfH9/w44AACz/P3/XzgAAKkH/v+vOAAAnxL+//04AACW
Hf7/SjkAAIwo/v+UOQAAgjP+/905AAB5Pv7/JDoAAG9J/v9pOgAAZVT+/606AABcX/7/7zoAAFJq
/v8vOwAASXX+/207AAA/gP7/qjsAADaL/v/lOwAALZb+/x48AAAjof7/VTwAABqs/v+LPAAAELf+
/788AAAHwv7/8TwAAP7M/v8hPQAA9df+/1A9AADr4v7/fT0AAOLt/v+oPQAA2fj+/9E9AADQA///
+T0AAMYO//8fPgAAvRn//0M+AAC0JP//ZT4AAKsv//+GPgAAojr//6U+AACZRf//wj4AAJBQ///d
PgAAh1v///c+AAB+Zv//Dz8AAHRx//8lPwAAa3z//zk/AABih///TD8AAFmS//9dPwAAUJ3//2w/
AABHqP//ej8AAD6z//+FPwAANb7//48/AAAsyf//lz8AACPU//+ePwAAGt///6M/AAAR6v//pj8A
AAj1//+nPwAA/////w=="""
verts = np.frombuffer(base64.decodebytes(data), dtype='<i4')
verts = verts.reshape((len(verts) // 2, 2))
path = Path(verts)
segs = path.iter_segments(transforms.IdentityTransform(),
clip=(0.0, 0.0, 100.0, 100.0))
segs = list(segs)
assert len(segs) == 1
assert segs[0][1] == Path.MOVETO
def test_throw_rendering_complexity_exceeded():
plt.rcParams['path.simplify'] = False
xx = np.arange(200000)
yy = np.random.rand(200000)
yy[1000] = np.nan
fig, ax = plt.subplots()
ax.plot(xx, yy)
with pytest.raises(OverflowError):
fig.savefig(io.BytesIO())
@image_comparison(baseline_images=['clipper_edge'], remove_text=True)
def test_clipper():
dat = (0, 1, 0, 2, 0, 3, 0, 4, 0, 5)
fig = plt.figure(figsize=(2, 1))
fig.subplots_adjust(left=0, bottom=0, wspace=0, hspace=0)
ax = fig.add_axes((0, 0, 1.0, 1.0), ylim=(0, 5), autoscale_on=False)
ax.plot(dat)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim(5, 9)
@image_comparison(baseline_images=['para_equal_perp'], remove_text=True)
def test_para_equal_perp():
x = np.array([0, 1, 2, 1, 0, -1, 0, 1] + [1] * 128)
y = np.array([1, 1, 2, 1, 0, -1, 0, 0] + [0] * 128)
fig, ax = plt.subplots()
ax.plot(x + 1, y + 1)
ax.plot(x + 1, y + 1, 'ro')
@image_comparison(baseline_images=['clipping_with_nans'])
def test_clipping_with_nans():
x = np.linspace(0, 3.14 * 2, 3000)
y = np.sin(x)
x[::100] = np.nan
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_ylim(-0.25, 0.25)
def test_clipping_full():
p = Path([[1e30, 1e30]] * 5)
simplified = list(p.iter_segments(clip=[0, 0, 100, 100]))
assert simplified == []
p = Path([[50, 40], [75, 65]], [1, 2])
simplified = list(p.iter_segments(clip=[0, 0, 100, 100]))
assert ([(list(x), y) for x, y in simplified] ==
[([50, 40], 1), ([75, 65], 2)])
p = Path([[50, 40]], [1])
simplified = list(p.iter_segments(clip=[0, 0, 100, 100]))
assert ([(list(x), y) for x, y in simplified] ==
[([50, 40], 1)])
|
0b25f5eb3be88c0429acd3148c27ed34e973cb1f9d5745e7413d46bed17d0ace
|
import io
import os
from pathlib import Path
import re
import tempfile
import warnings
import pytest
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cbook, patheffects
from matplotlib.testing.decorators import image_comparison
from matplotlib.testing.determinism import (_determinism_source_date_epoch,
_determinism_check)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
needs_ghostscript = pytest.mark.skipif(
"eps" not in mpl.testing.compare.converter,
reason="This test needs a ghostscript installation")
needs_usetex = pytest.mark.skipif(
not mpl.checkdep_usetex(True),
reason="This test needs a TeX installation")
# This tests tends to hit a TeX cache lock on AppVeyor.
@pytest.mark.flaky(reruns=3)
@pytest.mark.parametrize('format, use_log, rcParams', [
('ps', False, {}),
pytest.param('ps', False, {'ps.usedistiller': 'ghostscript'},
marks=needs_ghostscript),
pytest.param('ps', False, {'text.usetex': True},
marks=[needs_ghostscript, needs_usetex]),
('eps', False, {}),
('eps', True, {'ps.useafm': True}),
pytest.param('eps', False, {'text.usetex': True},
marks=[needs_ghostscript, needs_usetex]),
], ids=[
'ps',
'ps with distiller',
'ps with usetex',
'eps',
'eps afm',
'eps with usetex'
])
def test_savefig_to_stringio(format, use_log, rcParams):
mpl.rcParams.update(rcParams)
fig, ax = plt.subplots()
with io.StringIO() as s_buf, io.BytesIO() as b_buf:
if use_log:
ax.set_yscale('log')
ax.plot([1, 2], [1, 2])
ax.set_title("Déjà vu")
fig.savefig(s_buf, format=format)
fig.savefig(b_buf, format=format)
s_val = s_buf.getvalue().encode('ascii')
b_val = b_buf.getvalue()
# Remove comments from the output. This includes things that could
# change from run to run, such as the time.
s_val, b_val = [re.sub(b'%%.*?\n', b'', x) for x in [s_val, b_val]]
assert s_val == b_val.replace(b'\r\n', b'\n')
def test_patheffects():
mpl.rcParams['path.effects'] = [
patheffects.withStroke(linewidth=4, foreground='w')]
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
with io.BytesIO() as ps:
fig.savefig(ps, format='ps')
@needs_usetex
@needs_ghostscript
def test_tilde_in_tempfilename(tmpdir):
# Tilde ~ in the tempdir path (e.g. TMPDIR, TMP or TEMP on windows
# when the username is very long and windows uses a short name) breaks
# latex before https://github.com/matplotlib/matplotlib/pull/5928
base_tempdir = Path(tmpdir, "short-1")
base_tempdir.mkdir()
# Change the path for new tempdirs, which is used internally by the ps
# backend to write a file.
with cbook._setattr_cm(tempfile, tempdir=str(base_tempdir)):
# usetex results in the latex call, which does not like the ~
mpl.rcParams['text.usetex'] = True
plt.plot([1, 2, 3, 4])
plt.xlabel(r'\textbf{time} (s)')
# use the PS backend to write the file...
plt.savefig(base_tempdir / 'tex_demo.eps', format="ps")
def test_source_date_epoch():
"""Test SOURCE_DATE_EPOCH support for PS output"""
# SOURCE_DATE_EPOCH support is not tested with text.usetex,
# because the produced timestamp comes from ghostscript:
# %%CreationDate: D:20000101000000Z00\'00\', and this could change
# with another ghostscript version.
_determinism_source_date_epoch(
"ps", b"%%CreationDate: Sat Jan 01 00:00:00 2000")
def test_determinism_all():
"""Test for reproducible PS output"""
_determinism_check(format="ps")
@needs_usetex
@needs_ghostscript
def test_determinism_all_tex():
"""Test for reproducible PS/tex output"""
_determinism_check(format="ps", usetex=True)
@image_comparison(baseline_images=["empty"], extensions=["eps"])
def test_transparency():
fig, ax = plt.subplots()
ax.set_axis_off()
ax.plot([0, 1], color="r", alpha=0)
ax.text(.5, .5, "foo", color="r", alpha=0)
@needs_usetex
def test_failing_latex(tmpdir):
"""Test failing latex subprocess call"""
mpl.rcParams['text.usetex'] = True
# This fails with "Double subscript"
plt.xlabel("$22_2_2$")
with pytest.raises(RuntimeError):
plt.savefig(Path(tmpdir, "tmpoutput.ps"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.