repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Szpadel/sonata2
|
sonata/main.py
|
2
|
149315
|
__license__ = """
Sonata, an elegant GTK+ client for the Music Player Daemon
Copyright 2006-2008 Scott Horowitz <[email protected]>
This file is part of Sonata.
Sonata is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Sonata is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, gettext, os, warnings
import urllib, urllib2, re, gc, shutil
import threading
import mpd
import gobject, gtk, pango
# Prevent deprecation warning for egg:
warnings.simplefilter('ignore', DeprecationWarning)
try:
import egg.trayicon
HAVE_EGG = True
HAVE_STATUS_ICON = False
except ImportError:
HAVE_EGG = False
HAVE_STATUS_ICON = True
# Reset so that we can see any other deprecation warnings
warnings.simplefilter('default', DeprecationWarning)
# Default to no sugar, then test...
HAVE_SUGAR = False
VOLUME_ICON_SIZE = 4
if 'SUGAR_BUNDLE_PATH' in os.environ:
try:
from sugar.activity import activity
HAVE_STATUS_ICON = False
HAVE_SUGAR = True
VOLUME_ICON_SIZE = 3
except:
pass
import mpdhelper as mpdh
import misc, ui, img, tray, formatting
from consts import consts
from pluginsystem import pluginsystem
from config import Config
import preferences, tagedit, artwork, about, scrobbler, info, library, streams, playlists, current
import lyricwiki # plug-ins
import rhapsodycovers
import dbus_plugin as dbus
from version import version
class Base(object):
### XXX Warning, a long __init__ ahead:
def __init__(self, args, window=None, _sugar=False):
# The following attributes were used but not defined here before:
self.album_current_artist = None
self.allow_art_search = None
self.choose_dialog = None
self.chooseimage_visible = None
self.imagelist = None
self.iterate_handler = None
self.local_dest_filename = None
self.notification_width = None
self.remote_albumentry = None
self.remote_artistentry = None
self.remote_dest_filename = None
self.remotefilelist = None
self.seekidle = None
self.statusicon = None
self.trayeventbox = None
self.trayicon = None
self.trayimage = None
self.artwork = None
self.client = mpd.MPDClient()
self.conn = False
# Constants
self.TAB_CURRENT = _("Current")
self.TAB_LIBRARY = _("Library")
self.TAB_PLAYLISTS = _("Playlists")
self.TAB_STREAMS = _("Streams")
self.TAB_INFO = _("Info")
# If the connection to MPD times out, this will cause the interface to freeze while
# the socket.connect() calls are repeatedly executed. Therefore, if we were not
# able to make a connection, slow down the iteration check to once every 15 seconds.
self.iterate_time_when_connected = 500
self.iterate_time_when_disconnected_or_stopped = 1000 # Slow down polling when disconnected stopped
self.trying_connection = False
self.traytips = tray.TrayIconTips()
# better keep a reference around
try:
self.dbus_service = dbus.SonataDBus(self.dbus_show, self.dbus_toggle, self.dbus_popup)
except Exception:
pass
dbus.start_dbus_interface()
self.gnome_session_management()
misc.create_dir('~/.covers/')
# Initialize vars for GUI
self.current_tab = self.TAB_CURRENT
self.prevconn = []
self.prevstatus = None
self.prevsonginfo = None
self.popuptimes = ['2', '3', '5', '10', '15', '30', _('Entire song')]
self.exit_now = False
self.ignore_toggle_signal = False
self.user_connect = False
self.sonata_loaded = False
self.call_gc_collect = False
self.album_reset_artist()
show_prefs = False
self.merge_id = None
self.actionGroupProfiles = None
self.skip_on_profiles_click = False
self.last_repeat = None
self.last_random = None
self.last_title = None
self.last_progress_frac = None
self.last_progress_text = None
self.last_status_text = ""
self.eggtrayfile = None
self.eggtrayheight = None
self.img_clicked = False
self.mpd_update_queued = False
# XXX get rid of all of these:
self.all_tab_names = [self.TAB_CURRENT, self.TAB_LIBRARY, self.TAB_PLAYLISTS, self.TAB_STREAMS, self.TAB_INFO]
all_tab_ids = "current library playlists streams info".split()
self.tabname2id = dict(zip(self.all_tab_names, all_tab_ids))
self.tabid2name = dict(zip(all_tab_ids, self.all_tab_names))
self.tabname2tab = dict()
self.tabname2focus = dict()
self.plugintabs = dict()
self.config = Config(_('Default Profile'), _("by") + " %A " + _("from") + " %B", library.library_set_data)
self.preferences = preferences.Preferences(self.config,
self.on_connectkey_pressed, self.on_currsong_notify,
self.update_infofile, self.settings_save,
self.populate_profiles_for_menu)
self.settings_load()
self.setup_prefs_callbacks()
if args.start_visibility is not None:
self.config.withdrawn = not args.start_visibility
if self.config.autoconnect:
self.user_connect = True
args.apply_profile_arg(self.config)
self.notebook_show_first_tab = not self.config.tabs_expanded or self.config.withdrawn
# Add some icons, assign pixbufs:
self.iconfactory = gtk.IconFactory()
ui.icon(self.iconfactory, 'sonata', self.find_path('sonata.png'))
ui.icon(self.iconfactory, 'artist', self.find_path('sonata-artist.png'))
ui.icon(self.iconfactory, 'album', self.find_path('sonata-album.png'))
icon_theme = gtk.icon_theme_get_default()
if HAVE_SUGAR:
activity_root = activity.get_bundle_path()
icon_theme.append_search_path(os.path.join(activity_root, 'share'))
img_width, _img_height = gtk.icon_size_lookup(VOLUME_ICON_SIZE)
for iconname in ('stock_volume-mute', 'stock_volume-min', 'stock_volume-med', 'stock_volume-max'):
try:
ui.icon(self.iconfactory, iconname, icon_theme.lookup_icon(iconname, img_width, gtk.ICON_LOOKUP_USE_BUILTIN).get_filename())
except:
# Fallback to Sonata-included icons:
ui.icon(self.iconfactory, iconname, self.find_path('sonata-%s.png' % iconname))
# Main window
if window is None:
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window_owner = True
else:
self.window = window
self.window_owner = False
if self.window_owner:
self.window.set_title('Sonata')
self.window.set_role('mainWindow')
self.window.set_resizable(True)
if self.config.ontop:
self.window.set_keep_above(True)
if self.config.sticky:
self.window.stick()
if not self.config.decorated:
self.window.set_decorated(False)
self.preferences.window = self.window
self.notebook = gtk.Notebook()
# Artwork
self.artwork = artwork.Artwork(self.config, self.find_path, misc.is_lang_rtl(self.window), lambda:self.info_imagebox.get_size_request(), self.schedule_gc_collect, self.target_image_filename, self.imagelist_append, self.remotefilelist_append, self.notebook.get_allocation, self.set_allow_art_search, self.status_is_play_or_pause, self.find_path('sonata-album.png'), self.get_current_song_text)
# Popup menus:
actions = [
('sortmenu', None, _('_Sort List')),
('plmenu', None, _('Sa_ve Selected to')),
('profilesmenu', None, _('_Connection')),
('playaftermenu', None, _('P_lay after')),
('updatemenu', None, _('_Update')),
('chooseimage_menu', gtk.STOCK_CONVERT, _('Use _Remote Image...'), None, None, self.image_remote),
('localimage_menu', gtk.STOCK_OPEN, _('Use _Local Image...'), None, None, self.image_local),
('fullscreencoverart_menu', gtk.STOCK_FULLSCREEN, _('_Fullscreen Mode'), 'F11', None, self.fullscreen_cover_art),
('resetimage_menu', gtk.STOCK_CLEAR, _('Reset Image'), None, None, self.artwork.on_reset_image),
('playmenu', gtk.STOCK_MEDIA_PLAY, _('_Play'), None, None, self.mpd_pp),
('pausemenu', gtk.STOCK_MEDIA_PAUSE, _('Pa_use'), None, None, self.mpd_pp),
('stopmenu', gtk.STOCK_MEDIA_STOP, _('_Stop'), None, None, self.mpd_stop),
('prevmenu', gtk.STOCK_MEDIA_PREVIOUS, _('Pre_vious'), None, None, self.mpd_prev),
('nextmenu', gtk.STOCK_MEDIA_NEXT, _('_Next'), None, None, self.mpd_next),
('quitmenu', gtk.STOCK_QUIT, _('_Quit'), None, None, self.on_delete_event_yes),
('removemenu', gtk.STOCK_REMOVE, _('_Remove'), None, None, self.on_remove),
('clearmenu', gtk.STOCK_CLEAR, _('_Clear'), '<Ctrl>Delete', None, self.mpd_clear),
('updatefullmenu', None, _('_Entire Library'), '<Ctrl><Shift>u', None, self.on_updatedb),
('updateselectedmenu', None, _('_Selected Items'), '<Ctrl>u', None, self.on_updatedb_shortcut),
('preferencemenu', gtk.STOCK_PREFERENCES, _('_Preferences...'), 'F5', None, self.on_prefs),
('aboutmenu', None, _('_About...'), 'F1', None, self.on_about),
('tagmenu', None, _('_Edit Tags...'), '<Ctrl>t', None, self.on_tags_edit),
('addmenu', gtk.STOCK_ADD, _('_Add'), '<Ctrl>d', None, self.on_add_item),
('replacemenu', gtk.STOCK_REDO, _('_Replace'), '<Ctrl>r', None, self.on_replace_item),
('add2menu', None, _('Add'), '<Shift><Ctrl>d', None, self.on_add_item_play),
('replace2menu', None, _('Replace'), '<Shift><Ctrl>r', None, self.on_replace_item_play),
('rmmenu', None, _('_Delete...'), None, None, self.on_remove),
('sortshuffle', None, _('Shuffle'), '<Alt>r', None, self.mpd_shuffle),
]
keyactions = [
('expandkey', None, 'Expand Key', '<Alt>Down', None, self.on_expand),
('collapsekey', None, 'Collapse Key', '<Alt>Up', None, self.on_collapse),
('ppkey', None, 'Play/Pause Key', '<Ctrl>p', None, self.mpd_pp),
('stopkey', None, 'Stop Key', '<Ctrl>s', None, self.mpd_stop),
('prevkey', None, 'Previous Key', '<Ctrl>Left', None, self.mpd_prev),
('nextkey', None, 'Next Key', '<Ctrl>Right', None, self.mpd_next),
('lowerkey', None, 'Lower Volume Key', '<Ctrl>minus', None, self.on_volume_lower),
('raisekey', None, 'Raise Volume Key', '<Ctrl>plus', None, self.on_volume_raise),
('raisekey2', None, 'Raise Volume Key 2', '<Ctrl>equal', None, self.on_volume_raise),
('quitkey', None, 'Quit Key', '<Ctrl>q', None, self.on_delete_event_yes),
('quitkey2', None, 'Quit Key 2', '<Ctrl>w', None, self.on_delete_event),
('connectkey', None, 'Connect Key', '<Alt>c', None, self.on_connectkey_pressed),
('disconnectkey', None, 'Disconnect Key', '<Alt>d', None, self.on_disconnectkey_pressed),
('searchkey', None, 'Search Key', '<Ctrl>h', None, self.on_library_search_shortcut),
('nexttabkey', None, 'Next Tab Key', '<Alt>Right', None, self.switch_to_next_tab),
('prevtabkey', None, 'Prev Tab Key', '<Alt>Left', None, self.switch_to_prev_tab),
]
tabactions = [('tab%skey' % i, None, 'Tab%s Key' % i,
'<Alt>%s' % i, None,
lambda _a, i=i: self.switch_to_tab_num(i-1))
for i in range(1, 10)]
toggle_actions = [
('showmenu', None, _('S_how Sonata'), None, None, self.on_withdraw_app_toggle, not self.config.withdrawn),
('repeatmenu', None, _('_Repeat'), None, None, self.on_repeat_clicked, False),
('randommenu', None, _('Rando_m'), None, None, self.on_random_clicked, False),
]
toggle_tabactions = [
(self.TAB_CURRENT, None, self.TAB_CURRENT, None, None, self.on_tab_toggle, self.config.current_tab_visible),
(self.TAB_LIBRARY, None, self.TAB_LIBRARY, None, None, self.on_tab_toggle, self.config.library_tab_visible),
(self.TAB_PLAYLISTS, None, self.TAB_PLAYLISTS, None, None, self.on_tab_toggle, self.config.playlists_tab_visible),
(self.TAB_STREAMS, None, self.TAB_STREAMS, None, None, self.on_tab_toggle, self.config.streams_tab_visible),
(self.TAB_INFO, None, self.TAB_INFO, None, None, self.on_tab_toggle, self.config.info_tab_visible),
]
uiDescription = """
<ui>
<popup name="imagemenu">
<menuitem action="chooseimage_menu"/>
<menuitem action="localimage_menu"/>
<menuitem action="fullscreencoverart_menu"/>
<separator name="FM1"/>
<menuitem action="resetimage_menu"/>
</popup>
<popup name="traymenu">
<menuitem action="showmenu"/>
<separator name="FM1"/>
<menuitem action="playmenu"/>
<menuitem action="pausemenu"/>
<menuitem action="stopmenu"/>
<menuitem action="prevmenu"/>
<menuitem action="nextmenu"/>
<separator name="FM2"/>
<menuitem action="quitmenu"/>
</popup>
<popup name="mainmenu">
<menuitem action="addmenu"/>
<menuitem action="replacemenu"/>
<menu action="playaftermenu">
<menuitem action="add2menu"/>
<menuitem action="replace2menu"/>
</menu>
<menuitem action="newmenu"/>
<menuitem action="editmenu"/>
<menuitem action="removemenu"/>
<menuitem action="clearmenu"/>
<menuitem action="tagmenu"/>
<menuitem action="renamemenu"/>
<menuitem action="rmmenu"/>
<menu action="sortmenu">
<menuitem action="sortbytitle"/>
<menuitem action="sortbyartist"/>
<menuitem action="sortbyalbum"/>
<menuitem action="sortbyfile"/>
<menuitem action="sortbydirfile"/>
<separator name="FM3"/>
<menuitem action="sortshuffle"/>
<menuitem action="sortreverse"/>
</menu>
<menu action="plmenu">
<menuitem action="savemenu"/>
<separator name="FM4"/>
</menu>
<separator name="FM1"/>
<menuitem action="repeatmenu"/>
<menuitem action="randommenu"/>
<menu action="updatemenu">
<menuitem action="updateselectedmenu"/>
<menuitem action="updatefullmenu"/>
</menu>
<separator name="FM2"/>
<menu action="profilesmenu">
</menu>
<menuitem action="preferencemenu"/>
<menuitem action="aboutmenu"/>
<menuitem action="quitmenu"/>
</popup>
<popup name="librarymenu">
<menuitem action="filesystemview"/>
<menuitem action="artistview"/>
<menuitem action="genreview"/>
<menuitem action="albumview"/>
</popup>
<popup name="hidden">
<menuitem action="centerplaylistkey"/>
</popup>
"""
uiDescription += '<popup name="notebookmenu">'
uiDescription += ''.join('<menuitem action="%s"/>' % name
for name in self.all_tab_names)
uiDescription += "</popup>"
uiDescription += ''.join('<accelerator action="%s"/>' % a[0]
for a in keyactions + tabactions)
uiDescription += "</ui>"
# Try to connect to MPD:
self.mpd_connect(blocking=True)
if self.conn:
self.status = mpdh.status(self.client)
self.iterate_time = self.iterate_time_when_connected
self.songinfo = mpdh.currsong(self.client)
self.artwork.update_songinfo(self.songinfo)
elif self.config.initial_run:
show_prefs = True
# Realizing self.window will allow us to retrieve the theme's
# link-color; we can then apply to it various widgets:
try:
self.window.realize()
linkcolor = self.window.style_get_property("link-color").to_string()
except:
linkcolor = None
# Audioscrobbler
self.scrobbler = scrobbler.Scrobbler(self.config)
self.scrobbler.import_module()
self.scrobbler.init()
self.preferences.scrobbler = self.scrobbler
# Plug-ins imported as modules
self.lyricwiki = lyricwiki.LyricWiki()
self.rhapsodycovers = rhapsodycovers.RhapsodyCovers()
# Current tab
self.current = current.Current(self.config, self.client, self.TAB_CURRENT, self.on_current_button_press, self.connected, lambda:self.sonata_loaded, lambda:self.songinfo, self.update_statusbar, self.iterate_now, lambda:self.library.libsearchfilter_get_style(), self.new_tab)
self.current_treeview = self.current.get_treeview()
self.current_selection = self.current.get_selection()
currentactions = [
('centerplaylistkey', None, 'Center Playlist Key', '<Ctrl>i', None, self.current.center_song_in_list),
('sortbyartist', None, _('By Artist'), None, None, self.current.on_sort_by_artist),
('sortbyalbum', None, _('By Album'), None, None, self.current.on_sort_by_album),
('sortbytitle', None, _('By Song Title'), None, None, self.current.on_sort_by_title),
('sortbyfile', None, _('By File Name'), None, None, self.current.on_sort_by_file),
('sortbydirfile', None, _('By Dir & File Name'), None, None, self.current.on_sort_by_dirfile),
('sortreverse', None, _('Reverse List'), None, None, self.current.on_sort_reverse),
]
# Library tab
self.library = library.Library(self.config, self.client, self.artwork, self.TAB_LIBRARY, self.find_path('sonata-album.png'), self.settings_save, self.current.filtering_entry_make_red, self.current.filtering_entry_revert_color, self.current.filter_key_pressed, self.on_add_item, self.connected, self.on_library_button_press, self.new_tab)
self.library_treeview = self.library.get_treeview()
self.library_selection = self.library.get_selection()
libraryactions = self.library.get_libraryactions()
# Info tab
self.info = info.Info(self.config, self.artwork.get_info_image(), linkcolor, self.on_link_click, self.get_playing_song, self.TAB_INFO, self.on_image_activate, self.on_image_motion_cb, self.on_image_drop_cb, self.album_return_artist_and_tracks, self.new_tab)
self.info_imagebox = self.info.get_info_imagebox()
# Streams tab
self.streams = streams.Streams(self.config, self.window, self.on_streams_button_press, self.on_add_item, self.settings_save, self.TAB_STREAMS)
self.streams_treeview = self.streams.get_treeview()
self.streams_selection = self.streams.get_selection()
streamsactions = [
('newmenu', None, _('_New...'), '<Ctrl>n', None, self.streams.on_streams_new),
('editmenu', None, _('_Edit...'), None, None, self.streams.on_streams_edit),
]
# Playlists tab
self.playlists = playlists.Playlists(self.config, self.window, self.client, lambda:self.UIManager, self.update_menu_visibility, self.iterate_now, self.on_add_item, self.on_playlists_button_press, self.current.get_current_songs, self.connected, self.add_selected_to_playlist, self.TAB_PLAYLISTS)
self.playlists_treeview = self.playlists.get_treeview()
self.playlists_selection = self.playlists.get_selection()
playlistsactions = [
('savemenu', None, _('_New Playlist...'), '<Ctrl><Shift>s', None, self.playlists.on_playlist_save),
('renamemenu', None, _('_Rename...'), None, None, self.playlists.on_playlist_rename),
]
# Main app:
self.UIManager = gtk.UIManager()
actionGroup = gtk.ActionGroup('Actions')
actionGroup.add_actions(actions)
actionGroup.add_actions(keyactions)
actionGroup.add_actions(tabactions)
actionGroup.add_actions(currentactions)
actionGroup.add_actions(libraryactions)
actionGroup.add_actions(streamsactions)
actionGroup.add_actions(playlistsactions)
actionGroup.add_toggle_actions(toggle_actions)
actionGroup.add_toggle_actions(toggle_tabactions)
self.UIManager.insert_action_group(actionGroup, 0)
self.UIManager.add_ui_from_string(uiDescription)
self.populate_profiles_for_menu()
self.window.add_accel_group(self.UIManager.get_accel_group())
self.mainmenu = self.UIManager.get_widget('/mainmenu')
self.randommenu = self.UIManager.get_widget('/mainmenu/randommenu')
self.repeatmenu = self.UIManager.get_widget('/mainmenu/repeatmenu')
self.imagemenu = self.UIManager.get_widget('/imagemenu')
self.traymenu = self.UIManager.get_widget('/traymenu')
self.librarymenu = self.UIManager.get_widget('/librarymenu')
self.library.set_librarymenu(self.librarymenu)
self.notebookmenu = self.UIManager.get_widget('/notebookmenu')
mainhbox = gtk.HBox()
mainvbox = gtk.VBox()
tophbox = gtk.HBox()
self.albumimage = self.artwork.get_albumimage()
self.imageeventbox = ui.eventbox(add=self.albumimage)
self.imageeventbox.drag_dest_set(gtk.DEST_DEFAULT_HIGHLIGHT | gtk.DEST_DEFAULT_DROP, [("text/uri-list", 0, 80), ("text/plain", 0, 80)], gtk.gdk.ACTION_DEFAULT)
if not self.config.show_covers:
ui.hide(self.imageeventbox)
tophbox.pack_start(self.imageeventbox, False, False, 5)
topvbox = gtk.VBox()
toptophbox = gtk.HBox()
self.prevbutton = ui.button(stock=gtk.STOCK_MEDIA_PREVIOUS, relief=gtk.RELIEF_NONE, can_focus=False, hidetxt=True)
self.ppbutton = ui.button(stock=gtk.STOCK_MEDIA_PLAY, relief=gtk.RELIEF_NONE, can_focus=False, hidetxt=True)
self.stopbutton = ui.button(stock=gtk.STOCK_MEDIA_STOP, relief=gtk.RELIEF_NONE, can_focus=False, hidetxt=True)
self.nextbutton = ui.button(stock=gtk.STOCK_MEDIA_NEXT, relief=gtk.RELIEF_NONE, can_focus=False, hidetxt=True)
for mediabutton in (self.prevbutton, self.ppbutton, self.stopbutton, self.nextbutton):
toptophbox.pack_start(mediabutton, False, False, 0)
if not self.config.show_playback:
ui.hide(mediabutton)
self.progressbox = gtk.VBox()
self.progresslabel = ui.label(w=-1, h=6)
self.progressbox.pack_start(self.progresslabel)
self.progressbar = ui.progressbar(orient=gtk.PROGRESS_LEFT_TO_RIGHT, frac=0, step=0.05, ellipsize=pango.ELLIPSIZE_END)
self.progresseventbox = ui.eventbox(add=self.progressbar, visible=True)
self.progressbox.pack_start(self.progresseventbox, False, False, 0)
self.progresslabel2 = ui.label(w=-1, h=6)
self.progressbox.pack_start(self.progresslabel2)
toptophbox.pack_start(self.progressbox, True, True, 0)
if not self.config.show_progress:
ui.hide(self.progressbox)
self.volumebutton = gtk.VolumeButton()
self.volumebutton.set_adjustment(gtk.Adjustment(0, 0, 100, 5, 5,))
if not self.config.show_playback:
ui.hide(self.volumebutton)
toptophbox.pack_start(self.volumebutton, False, False, 0)
topvbox.pack_start(toptophbox, False, False, 2)
self.expander = ui.expander(text=_("Playlist"), expand=self.config.expanded, can_focus=False)
expanderbox = gtk.VBox()
self.cursonglabel1 = ui.label(y=0)
self.cursonglabel2 = ui.label(y=0)
expanderbox.pack_start(self.cursonglabel1, True, True, 0)
expanderbox.pack_start(self.cursonglabel2, True, True, 0)
self.expander.set_label_widget(expanderbox)
topvbox.pack_start(self.expander, False, False, 2)
tophbox.pack_start(topvbox, True, True, 3)
mainvbox.pack_start(tophbox, False, False, 5)
self.notebook.set_tab_pos(gtk.POS_TOP)
self.notebook.set_scrollable(True)
mainvbox.pack_start(self.notebook, True, True, 5)
self.statusbar = gtk.Statusbar()
self.statusbar.set_has_resize_grip(True)
if not self.config.show_statusbar or not self.config.expanded:
ui.hide(self.statusbar)
mainvbox.pack_start(self.statusbar, False, False, 0)
mainhbox.pack_start(mainvbox, True, True, 3)
if self.window_owner:
self.window.add(mainhbox)
self.window.move(self.config.x, self.config.y)
self.window.set_size_request(270, -1)
elif HAVE_SUGAR:
self.window.set_canvas(mainhbox)
if not self.config.expanded:
ui.hide(self.notebook)
self.cursonglabel1.set_markup('<big><b>' + _('Stopped') + '</b></big>')
self.cursonglabel2.set_markup('<small>' + _('Click to expand') + '</small>')
if self.window_owner:
self.window.set_default_size(self.config.w, 1)
else:
self.cursonglabel1.set_markup('<big><b>' + _('Stopped') + '</b></big>')
self.cursonglabel2.set_markup('<small>' + _('Click to collapse') + '</small>')
if self.window_owner:
self.window.set_default_size(self.config.w, self.config.h)
self.expander.set_tooltip_text(self.cursonglabel1.get_text())
if not self.conn:
self.progressbar.set_text(_('Not Connected'))
elif not self.status:
self.progressbar.set_text(_('No Read Permission'))
# Update tab positions: XXX move to self.new_tab
self.notebook.reorder_child(self.current.get_widgets(), self.config.current_tab_pos)
self.notebook.reorder_child(self.library.get_widgets(), self.config.library_tab_pos)
self.notebook.reorder_child(self.playlists.get_widgets(), self.config.playlists_tab_pos)
self.notebook.reorder_child(self.streams.get_widgets(), self.config.streams_tab_pos)
self.notebook.reorder_child(self.info.get_widgets(), self.config.info_tab_pos)
self.last_tab = self.notebook_get_tab_text(self.notebook, 0)
# Song notification window:
outtertipbox = gtk.VBox()
tipbox = gtk.HBox()
self.trayalbumeventbox, self.trayalbumimage2 = self.artwork.get_trayalbum()
hiddenlbl = ui.label(w=2, h=-1)
tipbox.pack_start(hiddenlbl, False, False, 0)
tipbox.pack_start(self.trayalbumeventbox, False, False, 0)
tipbox.pack_start(self.trayalbumimage2, False, False, 0)
if not self.config.show_covers:
ui.hide(self.trayalbumeventbox)
ui.hide(self.trayalbumimage2)
innerbox = gtk.VBox()
self.traycursonglabel1 = ui.label(markup=_("Playlist"), y=1)
self.traycursonglabel2 = ui.label(markup=_("Playlist"), y=0)
label1 = ui.label(markup='<span size="10"> </span>')
innerbox.pack_start(label1)
innerbox.pack_start(self.traycursonglabel1, True, True, 0)
innerbox.pack_start(self.traycursonglabel2, True, True, 0)
self.trayprogressbar = ui.progressbar(orient=gtk.PROGRESS_LEFT_TO_RIGHT, frac=0, step=0.05, ellipsize=pango.ELLIPSIZE_NONE)
label2 = ui.label(markup='<span size="10"> </span>')
innerbox.pack_start(label2)
innerbox.pack_start(self.trayprogressbar, False, False, 0)
if not self.config.show_progress:
ui.hide(self.trayprogressbar)
label3 = ui.label(markup='<span size="10"> </span>')
innerbox.pack_start(label3)
tipbox.pack_start(innerbox, True, True, 6)
outtertipbox.pack_start(tipbox, False, False, 2)
outtertipbox.show_all()
self.traytips.add_widget(outtertipbox)
self.tooltip_set_window_width()
# Fullscreen cover art window
self.fullscreencoverart = gtk.Window()
self.fullscreencoverart.set_title(_("Cover Art"))
self.fullscreencoverart.set_decorated(True)
self.fullscreencoverart.fullscreen()
style = self.fullscreencoverart.get_style().copy()
style.bg[gtk.STATE_NORMAL] = self.fullscreencoverart.get_colormap().alloc_color("black")
style.bg_pixmap[gtk.STATE_NORMAL] = None
self.fullscreencoverart.set_style(style)
self.fullscreencoverart.add_accel_group(self.UIManager.get_accel_group())
fscavbox = gtk.VBox()
fscahbox = gtk.HBox()
self.fullscreenalbumimage = self.artwork.get_fullscreenalbumimage()
fscalbl, fscalbl2 = self.artwork.get_fullscreenalbumlabels()
fscahbox.pack_start(self.fullscreenalbumimage, True, False, 0)
fscavbox.pack_start(ui.label(), True, False, 0)
fscavbox.pack_start(fscahbox, False, False, 0)
fscavbox.pack_start(fscalbl, False, False, 5)
fscavbox.pack_start(fscalbl2, False, False, 5)
fscavbox.pack_start(ui.label(), True, False, 0)
if not self.config.show_covers:
ui.hide(self.fullscreenalbumimage)
self.fullscreencoverart.add(fscavbox)
# Connect to signals
self.window.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.traytips.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.traytips.connect('button_press_event', self.on_traytips_press)
self.window.connect('delete_event', self.on_delete_event)
self.window.connect('configure_event', self.on_window_configure)
self.window.connect('key-press-event', self.on_topwindow_keypress)
self.imageeventbox.connect('button_press_event', self.on_image_activate)
self.imageeventbox.connect('drag_motion', self.on_image_motion_cb)
self.imageeventbox.connect('drag_data_received', self.on_image_drop_cb)
self.ppbutton.connect('clicked', self.mpd_pp)
self.stopbutton.connect('clicked', self.mpd_stop)
self.prevbutton.connect('clicked', self.mpd_prev)
self.nextbutton.connect('clicked', self.mpd_next)
self.progresseventbox.connect('button_press_event', self.on_progressbar_press)
self.progresseventbox.connect('scroll_event', self.on_progressbar_scroll)
self.volumebutton.connect('value-changed', self.on_volume_change)
self.expander.connect('activate', self.on_expander_activate)
self.randommenu.connect('toggled', self.on_random_clicked)
self.repeatmenu.connect('toggled', self.on_repeat_clicked)
self.cursonglabel1.connect('notify::label', self.on_currsong_notify)
self.progressbar.connect('notify::fraction', self.on_progressbar_notify_fraction)
self.progressbar.connect('notify::text', self.on_progressbar_notify_text)
self.mainwinhandler = self.window.connect('button_press_event', self.on_window_click)
self.notebook.connect('size-allocate', self.on_notebook_resize)
self.notebook.connect('switch-page', self.on_notebook_page_change)
self.fullscreencoverart.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.fullscreencoverart.connect("button-press-event", self.fullscreen_cover_art_close, False)
self.fullscreencoverart.connect("key-press-event", self.fullscreen_cover_art_close, True)
for treeview in [self.current_treeview, self.library_treeview, self.playlists_treeview, self.streams_treeview]:
treeview.connect('popup_menu', self.on_menu_popup)
for treeviewsel in [self.current_selection, self.library_selection, self.playlists_selection, self.streams_selection]:
treeviewsel.connect('changed', self.on_treeview_selection_changed)
for widget in [self.ppbutton, self.prevbutton, self.stopbutton, self.nextbutton, self.progresseventbox, self.expander]:
widget.connect('button_press_event', self.menu_popup)
self.systemtray_initialize()
# This will ensure that "Not connected" is shown in the systray tooltip
if not self.conn:
self.update_cursong()
# Ensure that the systemtray icon is added here. This is really only
# important if we're starting in hidden (minimized-to-tray) mode:
if self.window_owner and self.config.withdrawn:
while gtk.events_pending():
gtk.main_iteration()
dbus.init_gnome_mediakeys(self.mpd_pp, self.mpd_stop, self.mpd_prev, self.mpd_next)
# Try to connect to mmkeys signals, if no dbus and gnome 2.18+
if not dbus.using_gnome_mediakeys():
try:
import mmkeys
# this must be an attribute to keep it around:
self.keys = mmkeys.MmKeys()
self.keys.connect("mm_prev", self.mpd_prev)
self.keys.connect("mm_next", self.mpd_next)
self.keys.connect("mm_playpause", self.mpd_pp)
self.keys.connect("mm_stop", self.mpd_stop)
except ImportError:
pass
# Set up current view
self.currentdata = self.current.get_model()
# Initialize playlist data and widget
self.playlistsdata = self.playlists.get_model()
# Initialize streams data and widget
self.streamsdata = self.streams.get_model()
# Initialize library data and widget
self.librarydata = self.library.get_model()
self.artwork.library_artwork_init(self.librarydata, consts.LIB_COVER_SIZE)
if self.window_owner:
icon = self.window.render_icon('sonata', gtk.ICON_SIZE_DIALOG)
self.window.set_icon(icon)
self.streams.populate()
self.iterate_now()
if self.window_owner:
if self.config.withdrawn:
if (HAVE_EGG and self.trayicon.get_property('visible')) or (HAVE_STATUS_ICON and self.statusicon.is_embedded() and self.statusicon.get_visible()):
ui.hide(self.window)
self.window.show_all()
# Ensure that button images are displayed despite GTK+ theme
self.window.get_settings().set_property("gtk-button-images", True)
if self.config.update_on_start:
self.on_updatedb(None)
self.notebook.set_no_show_all(False)
self.window.set_no_show_all(False)
if show_prefs:
self.on_prefs(None)
self.config.initial_run = False
# Ensure that sonata is loaded before we display the notif window
self.sonata_loaded = True
self.on_currsong_notify()
self.current.center_song_in_list()
if HAVE_STATUS_ICON:
gobject.timeout_add(250, self.iterate_status_icon)
gc.disable()
gobject.idle_add(self.header_save_column_widths)
pluginsystem.notify_of('tabs',
self.on_enable_tab,
self.on_disable_tab)
# Autostart plugins
for plugin in pluginsystem.get_info():
if plugin.name in self.config.autostart_plugins:
pluginsystem.set_enabled(plugin, True)
# New plugins
for plugin in pluginsystem.get_info():
if plugin.name not in self.config.known_plugins:
self.config.known_plugins.append(plugin.name)
if plugin.name in consts.DEFAULT_PLUGINS:
print _("Enabling new plug-in %s..." %
plugin.name)
pluginsystem.set_enabled(plugin, True)
else:
print _("Found new plug-in %s." %
plugin.name)
### Tab system:
def on_enable_tab(self, _plugin, tab):
self.plugintabs[tab] = self.new_tab(*tab())
def on_disable_tab(self, _plugin, tab):
self.notebook.remove(self.plugintabs.pop(tab))
def new_tab(self, page, stock, text, focus):
# create the "ear" of the tab:
hbox = gtk.HBox()
hbox.pack_start(ui.image(stock=stock), False, False, 2)
hbox.pack_start(ui.label(text=text), False, False, 2)
evbox = ui.eventbox(add=hbox)
evbox.show_all()
evbox.connect("button_press_event", self.on_tab_click)
# create the actual tab:
self.notebook.append_page(page, evbox)
if (text in self.tabname2id and
not getattr(self.config,
self.tabname2id[text]+'_tab_visible')):
ui.hide(page)
self.notebook.set_tab_reorderable(page, True)
if self.config.tabs_expanded:
self.notebook.set_tab_label_packing(page, True, True, gtk.PACK_START)
self.tabname2tab[text] = page
self.tabname2focus[text] = focus
return page
### "Model, logic":
def connected(self):
return self.conn
def status_is_play_or_pause(self):
return (self.conn and self.status and
self.status.get('state', None) in ['play', 'pause'])
def get_playing_song(self):
if self.status_is_play_or_pause() and self.songinfo:
return self.songinfo
return None
def playing_song_change(self):
self.artwork.artwork_update()
for _plugin, cb in pluginsystem.get('playing_song_observers'):
cb(self.get_playing_song())
def get_current_song_text(self):
return (self.cursonglabel1.get_text(),
self.cursonglabel2.get_text())
def set_allow_art_search(self):
self.allow_art_search = True
### XXX The rest:
def gnome_session_management(self):
try:
import gnome, gnome.ui
# Code thanks to quodlibet:
# XXX gnome.init sets process name, locale...
gnome.init("sonata", version)
misc.setlocale()
client = gnome.ui.master_client()
client.set_restart_style(gnome.ui.RESTART_IF_RUNNING)
command = os.path.normpath(os.path.join(os.getcwd(), sys.argv[0]))
try:
client.set_restart_command([command] + sys.argv[1:])
except TypeError:
# Fedora systems have a broken gnome-python wrapper for this function.
# http://www.sacredchao.net/quodlibet/ticket/591
# http://trac.gajim.org/ticket/929
client.set_restart_command(len(sys.argv), [command] + sys.argv[1:])
client.connect('die', gtk.main_quit)
except:
pass
def populate_profiles_for_menu(self):
host, port, _password = misc.mpd_env_vars()
if self.merge_id:
self.UIManager.remove_ui(self.merge_id)
if self.actionGroupProfiles:
self.UIManager.remove_action_group(self.actionGroupProfiles)
self.actionGroupProfiles = gtk.ActionGroup('MPDProfiles')
self.UIManager.ensure_update()
profile_names = [_("MPD_HOST/PORT")] if host or port else self.config.profile_names
actions = [(str(i), None,
"[%s] %s" % (i+1, name.replace("_", "__")), None,
None, i)
for i, name in enumerate(profile_names)]
actions.append(('disconnect', None, _('Disconnect'), None, None, len(self.config.profile_names)))
active_radio = 0 if host or port else self.config.profile_num
if not self.conn:
active_radio = len(self.config.profile_names)
self.actionGroupProfiles.add_radio_actions(actions, active_radio, self.on_profiles_click)
uiDescription = """
<ui>
<popup name="mainmenu">
<menu action="profilesmenu">
"""
uiDescription += "".join(
'<menuitem action=\"%s\" position="top"/>' % action[0]
for action in reversed(actions))
uiDescription += """</menu></popup></ui>"""
self.merge_id = self.UIManager.add_ui_from_string(uiDescription)
self.UIManager.insert_action_group(self.actionGroupProfiles, 0)
self.UIManager.get_widget('/hidden').set_property('visible', False)
def on_profiles_click(self, _radioaction, profile):
if self.skip_on_profiles_click:
return
if profile.get_name() == 'disconnect':
self.on_disconnectkey_pressed(None)
else:
# Clear sonata before we try to connect:
self.mpd_disconnect()
self.iterate_now()
# Now connect to new profile:
self.config.profile_num = profile.get_current_value()
self.on_connectkey_pressed(None)
def mpd_connect(self, blocking=False, force=False):
if blocking:
self._mpd_connect(blocking, force)
else:
thread = threading.Thread(target=self._mpd_connect, args=(blocking, force))
thread.setDaemon(True)
thread.start()
def _mpd_connect(self, _blocking, force):
if self.trying_connection:
return
self.trying_connection = True
if self.user_connect or force:
mpdh.call(self.client, 'disconnect')
host, port, password = misc.mpd_env_vars()
if not host:
host = self.config.host[self.config.profile_num]
if not port:
port = self.config.port[self.config.profile_num]
if not password:
password = self.config.password[self.config.profile_num]
mpdh.call(self.client, 'connect', host, port)
if len(password) > 0:
mpdh.call(self.client, 'password', password)
test = mpdh.status(self.client)
if test:
self.conn = True
else:
self.conn = False
else:
self.conn = False
if not self.conn:
self.status = None
self.songinfo = None
if self.artwork is not None:
self.artwork.update_songinfo(self.songinfo)
self.iterate_time = self.iterate_time_when_disconnected_or_stopped
self.trying_connection = False
def mpd_disconnect(self):
if self.conn:
mpdh.call(self.client, 'close')
mpdh.call(self.client, 'disconnect')
self.conn = False
def on_connectkey_pressed(self, _event=None):
self.user_connect = True
# Update selected radio button in menu:
self.skip_on_profiles_click = True
host, port, _password = misc.mpd_env_vars()
index = str(0 if host or port else self.config.profile_num)
self.actionGroupProfiles.get_action(index).activate()
self.skip_on_profiles_click = False
# Connect:
self.mpd_connect(force=True)
self.iterate_now()
def on_disconnectkey_pressed(self, _event):
self.user_connect = False
# Update selected radio button in menu:
self.skip_on_profiles_click = True
self.actionGroupProfiles.get_action('disconnect').activate()
self.skip_on_profiles_click = False
# Disconnect:
self.mpd_disconnect()
def update_status(self):
try:
if not self.conn:
self.mpd_connect()
if self.conn:
self.iterate_time = self.iterate_time_when_connected
self.status = mpdh.status(self.client)
if self.status:
if self.status['state'] == 'stop':
self.iterate_time = self.iterate_time_when_disconnected_or_stopped
self.songinfo = mpdh.currsong(self.client)
self.artwork.update_songinfo(self.songinfo)
if not self.last_repeat or self.last_repeat != self.status['repeat']:
self.repeatmenu.set_active(self.status['repeat'] == '1')
if not self.last_random or self.last_random != self.status['random']:
self.randommenu.set_active(self.status['random'] == '1')
if self.status['xfade'] == '0':
self.config.xfade_enabled = False
else:
self.config.xfade_enabled = True
self.config.xfade = int(self.status['xfade'])
if self.config.xfade > 30:
self.config.xfade = 30
self.last_repeat = self.status['repeat']
self.last_random = self.status['random']
return
except:
pass
self.prevconn = self.client
self.prevstatus = self.status
self.prevsonginfo = self.songinfo
self.conn = False
self.status = None
self.songinfo = None
self.artwork.update_songinfo(self.songinfo)
def iterate(self):
self.update_status()
self.info_update(False)
if self.conn != self.prevconn:
self.handle_change_conn()
if self.status != self.prevstatus:
self.handle_change_status()
if self.songinfo != self.prevsonginfo:
self.handle_change_song()
self.prevconn = self.conn
self.prevstatus = self.status
self.prevsonginfo = self.songinfo
self.iterate_handler = gobject.timeout_add(self.iterate_time, self.iterate) # Repeat ad infitum..
if self.config.show_trayicon:
if HAVE_STATUS_ICON:
if self.statusicon.is_embedded() and not self.statusicon.get_visible():
# Systemtray appears, add icon:
self.systemtray_initialize()
elif not self.statusicon.is_embedded() and self.config.withdrawn:
# Systemtray gone, unwithdraw app:
self.withdraw_app_undo()
elif HAVE_EGG:
if not self.trayicon.get_property('visible'):
# Systemtray appears, add icon:
self.systemtray_initialize()
if self.call_gc_collect:
gc.collect()
self.call_gc_collect = False
def schedule_gc_collect(self):
self.call_gc_collect = True
def iterate_stop(self):
try:
gobject.source_remove(self.iterate_handler)
except:
pass
def iterate_now(self):
# Since self.iterate_time_when_connected has been
# slowed down to 500ms, we'll call self.iterate_now()
# whenever the user performs an action that requires
# updating the client
self.iterate_stop()
self.iterate()
def iterate_status_icon(self):
# Polls for the users' cursor position to display the custom tooltip window when over the
# gtk.StatusIcon. We use this instead of self.iterate() in order to poll more often and
# increase responsiveness.
if self.config.show_trayicon:
if self.statusicon.is_embedded() and self.statusicon.get_visible():
self.tooltip_show_manually()
gobject.timeout_add(250, self.iterate_status_icon)
def on_topwindow_keypress(self, _widget, event):
shortcut = gtk.accelerator_name(event.keyval, event.state)
shortcut = shortcut.replace("<Mod2>", "")
# These shortcuts were moved here so that they don't interfere with searching the library
if shortcut == 'BackSpace' and self.current_tab == self.TAB_LIBRARY:
return self.library.library_browse_parent(None)
elif shortcut == 'Escape':
if self.current_tab == self.TAB_LIBRARY and self.library.search_visible():
self.library.on_search_end(None)
elif self.current_tab == self.TAB_CURRENT and self.current.filterbox_visible:
self.current.searchfilter_toggle(None)
elif self.config.minimize_to_systray:
if HAVE_STATUS_ICON and self.statusicon.is_embedded() and self.statusicon.get_visible():
self.withdraw_app()
elif HAVE_EGG and self.trayicon.get_property('visible'):
self.withdraw_app()
return
elif shortcut == 'Delete':
self.on_remove(None)
if self.current_tab == self.TAB_CURRENT:
if event.state & (gtk.gdk.CONTROL_MASK | gtk.gdk.MOD1_MASK):
return
# XXX this isn't the right thing with GTK input methods:
text = unichr(gtk.gdk.keyval_to_unicode(event.keyval))
# We only want to toggle open the filterbar if the key press is actual text! This
# will ensure that we skip, e.g., F5, Alt, Ctrl, ...
if text != u"\x00" and text.strip():
if not self.current.filterbox_visible:
if text != u"/":
self.current.searchfilter_toggle(None, text)
else:
self.current.searchfilter_toggle(None)
def settings_load(self):
self.config.settings_load_real(library.library_set_data)
def settings_save(self):
self.header_save_column_widths()
self.config.current_tab_pos = self.notebook_get_tab_num(self.notebook, self.TAB_CURRENT)
self.config.library_tab_pos = self.notebook_get_tab_num(self.notebook, self.TAB_LIBRARY)
self.config.playlists_tab_pos = self.notebook_get_tab_num(self.notebook, self.TAB_PLAYLISTS)
self.config.streams_tab_pos = self.notebook_get_tab_num(self.notebook, self.TAB_STREAMS)
self.config.info_tab_pos = self.notebook_get_tab_num(self.notebook, self.TAB_INFO)
autostart_plugins = []
for plugin in pluginsystem.plugin_infos:
if plugin._enabled:
autostart_plugins.append(plugin.name)
self.config.autostart_plugins = autostart_plugins
self.config.settings_save_real(library.library_get_data)
def handle_change_conn(self):
if not self.conn:
for mediabutton in (self.ppbutton, self.stopbutton, self.prevbutton, self.nextbutton, self.volumebutton):
mediabutton.set_property('sensitive', False)
self.currentdata.clear()
if self.current_treeview.get_model():
self.current_treeview.get_model().clear()
if HAVE_STATUS_ICON:
self.statusicon.set_from_file(self.find_path('sonata_disconnect.png'))
elif HAVE_EGG and self.eggtrayheight:
self.eggtrayfile = self.find_path('sonata_disconnect.png')
self.trayimage.set_from_pixbuf(img.get_pixbuf_of_size(gtk.gdk.pixbuf_new_from_file(self.eggtrayfile), self.eggtrayheight)[0])
self.info_update(True)
if self.current.filterbox_visible:
gobject.idle_add(self.current.searchfilter_toggle, None)
if self.library.search_visible():
self.library.on_search_end(None)
self.handle_change_song()
self.handle_change_status()
else:
for mediabutton in (self.ppbutton, self.stopbutton, self.prevbutton, self.nextbutton, self.volumebutton):
mediabutton.set_property('sensitive', True)
if self.sonata_loaded:
self.library.library_browse(library.library_set_data(path="/"))
self.playlists.populate()
self.streams.populate()
self.on_notebook_page_change(self.notebook, 0, self.notebook.get_current_page())
def info_update(self, update_all):
playing_or_paused = self.status_is_play_or_pause()
newbitrate = None
if self.status:
newbitrate = self.status.get('bitrate', '')
if newbitrate:
newbitrate += " kbps"
self.info.update(playing_or_paused, newbitrate, self.songinfo,
update_all)
def on_treeview_selection_changed(self, treeselection):
self.update_menu_visibility()
if treeselection == self.current.get_selection():
# User previously clicked inside group of selected rows, re-select
# rows so it doesn't look like anything changed:
if self.current.sel_rows:
for row in self.current.sel_rows:
treeselection.select_path(row)
# Update lib artwork
self.library.on_library_scrolled(None, None)
def on_library_button_press(self, widget, event):
if self.on_button_press(widget, event, False): return True
def on_current_button_press(self, widget, event):
if self.on_button_press(widget, event, True): return True
def on_playlists_button_press(self, widget, event):
if self.on_button_press(widget, event, False): return True
def on_streams_button_press(self, widget, event):
if self.on_button_press(widget, event, False): return True
def on_button_press(self, widget, event, widget_is_current):
ctrl_press = (event.state & gtk.gdk.CONTROL_MASK)
self.current.sel_rows = None
if event.button == 1 and widget_is_current and not ctrl_press:
# If the user clicked inside a group of rows that were already selected,
# we need to retain the selected rows in case the user wants to DND the
# group of rows. If they release the mouse without first moving it,
# then we revert to the single selected row. This is similar to the
# behavior found in thunar.
try:
path, _col, _x, _y = widget.get_path_at_pos(int(event.x), int(event.y))
if widget.get_selection().path_is_selected(path):
self.current.sel_rows = widget.get_selection().get_selected_rows()[1]
except:
pass
elif event.button == 3:
self.update_menu_visibility()
# Calling the popup in idle_add is important. It allows the menu items
# to have been shown/hidden before the menu is popped up. Otherwise, if
# the menu pops up too quickly, it can result in automatically clicking
# menu items for the user!
gobject.idle_add(self.mainmenu.popup, None, None, None, event.button, event.time)
# Don't change the selection for a right-click. This
# will allow the user to select multiple rows and then
# right-click (instead of right-clicking and having
# the current selection change to the current row)
if widget.get_selection().count_selected_rows() > 1:
return True
def on_add_item_play(self, widget):
self.on_add_item(widget, True)
def on_add_item(self, _widget, play_after=False):
if self.conn:
if play_after and self.status:
playid = self.status['playlistlength']
if self.current_tab == self.TAB_LIBRARY:
items = self.library.get_path_child_filenames(True)
mpdh.call(self.client, 'command_list_ok_begin')
for item in items:
mpdh.call(self.client, 'add', item)
mpdh.call(self.client, 'command_list_end')
elif self.current_tab == self.TAB_PLAYLISTS:
model, selected = self.playlists_selection.get_selected_rows()
for path in selected:
mpdh.call(self.client, 'load', misc.unescape_html(model.get_value(model.get_iter(path), 1)))
elif self.current_tab == self.TAB_STREAMS:
model, selected = self.streams_selection.get_selected_rows()
for path in selected:
item = model.get_value(model.get_iter(path), 2)
self.stream_parse_and_add(item)
self.iterate_now()
if play_after:
if self.status['random'] == '1':
# If we are in random mode, we want to play a random song
# instead:
mpdh.call(self.client, 'play')
else:
mpdh.call(self.client, 'play', int(playid))
def add_selected_to_playlist(self, plname):
if self.current_tab == self.TAB_LIBRARY:
songs = self.library.get_path_child_filenames(True)
elif self.current_tab == self.TAB_CURRENT:
songs = self.current.get_selected_filenames(0)
else:
raise Exception("This tab doesn't support playlists")
mpdh.call(self.client, 'command_list_ok_begin')
for song in songs:
mpdh.call(self.client, 'playlistadd', plname, song)
mpdh.call(self.client, 'command_list_end')
def stream_parse_and_add(self, item):
# We need to do different things depending on if this is
# a normal stream, pls, m3u, etc..
# Note that we will only download the first 4000 bytes
while gtk.events_pending():
gtk.main_iteration()
f = None
try:
request = urllib2.Request(item)
opener = urllib2.build_opener()
f = opener.open(request).read(4000)
except:
try:
request = urllib2.Request("http://" + item)
opener = urllib2.build_opener()
f = opener.open(request).read(4000)
except:
try:
request = urllib2.Request("file://" + item)
opener = urllib2.build_opener()
f = opener.open(request).read(4000)
except:
pass
while gtk.events_pending():
gtk.main_iteration()
if f:
if misc.is_binary(f):
# Binary file, just add it:
mpdh.call(self.client, 'add', item)
else:
if "[playlist]" in f:
# pls:
self.stream_parse_pls(f)
elif "#EXTM3U" in f:
# extended m3u:
self.stream_parse_m3u(f)
elif "http://" in f:
# m3u or generic list:
self.stream_parse_m3u(f)
else:
# Something else..
mpdh.call(self.client, 'add', item)
else:
# Hopefully just a regular stream, try to add it:
mpdh.call(self.client, 'add', item)
def stream_parse_pls(self, f):
lines = f.split("\n")
for line in lines:
line = line.replace('\r','')
delim = line.find("=")+1
if delim > 0:
line = line[delim:]
if len(line) > 7 and line[0:7] == 'http://':
mpdh.call(self.client, 'add', line)
elif len(line) > 6 and line[0:6] == 'ftp://':
mpdh.call(self.client, 'add', line)
def stream_parse_m3u(self, f):
lines = f.split("\n")
for line in lines:
line = line.replace('\r','')
if len(line) > 7 and line[0:7] == 'http://':
mpdh.call(self.client, 'add', line)
elif len(line) > 6 and line[0:6] == 'ftp://':
mpdh.call(self.client, 'add', line)
def on_replace_item_play(self, widget):
self.on_replace_item(widget, True)
def on_replace_item(self, widget, play_after=False):
if self.status and self.status['state'] == 'play':
play_after = True
# Only clear if an item is selected:
if self.current_tab == self.TAB_LIBRARY:
num_selected = self.library_selection.count_selected_rows()
elif self.current_tab == self.TAB_PLAYLISTS:
num_selected = self.playlists_selection.count_selected_rows()
elif self.current_tab == self.TAB_STREAMS:
num_selected = self.streams_selection.count_selected_rows()
else:
return
if num_selected == 0:
return
self.mpd_clear(None)
self.on_add_item(widget, play_after)
self.iterate_now()
def menu_position(self, _menu):
if self.config.expanded:
_x, y, width, _height = self.current_treeview.get_allocation()
# Find first selected visible row and popup the menu
# from there
if self.current_tab == self.TAB_CURRENT:
widget = self.current_treeview
column = self.current.columns[0]
elif self.current_tab == self.TAB_LIBRARY:
widget = self.library_treeview
column = self.library.librarycolumn
elif self.current_tab == self.TAB_PLAYLISTS:
widget = self.playlists_treeview
column = self.playlists.playlistscolumn
elif self.current_tab == self.TAB_STREAMS:
widget = self.streams_treeview
column = self.streams.streamscolumn
rows = widget.get_selection().get_selected_rows()[1]
visible_rect = widget.get_visible_rect()
row_y = 0
for row in rows:
row_rect = widget.get_background_area(row, column)
if row_rect.y + row_rect.height <= visible_rect.height and row_rect.y >= 0:
row_y = row_rect.y + 30
break
return (self.config.x + width - 150, self.config.y + y + row_y, True)
else:
return (self.config.x + 250, self.config.y + 80, True)
def handle_change_status(self):
# Called when one of the following items are changed:
# 1. Current playlist (song added, removed, etc)
# 2. Repeat/random/xfade/volume
# 3. Currently selected song in playlist
# 4. Status (playing/paused/stopped)
if self.status is None:
# clean up and bail out
self.update_progressbar()
self.update_cursong()
self.update_wintitle()
self.playing_song_change()
self.update_statusbar()
if not self.conn:
self.librarydata.clear()
self.playlistsdata.clear()
self.streamsdata.clear()
return
# Display current playlist
if self.prevstatus is None or self.prevstatus['playlist'] != self.status['playlist']:
prevstatus_playlist = None
if self.prevstatus:
prevstatus_playlist = self.prevstatus['playlist']
self.current.current_update(prevstatus_playlist, self.status['playlistlength'])
# Update progress frequently if we're playing
if self.status_is_play_or_pause():
self.update_progressbar()
# If elapsed time is shown in the window title, we need to update more often:
if "%E" in self.config.titleformat:
self.update_wintitle()
# If state changes
if self.prevstatus is None or self.prevstatus['state'] != self.status['state']:
self.album_get_artist()
# Update progressbar if the state changes too
self.update_progressbar()
self.update_cursong()
self.update_wintitle()
self.info_update(True)
if self.status['state'] == 'stop':
self.ppbutton.set_image(ui.image(stock=gtk.STOCK_MEDIA_PLAY, stocksize=gtk.ICON_SIZE_BUTTON))
self.ppbutton.get_child().get_child().get_children()[1].set_text('')
self.UIManager.get_widget('/traymenu/playmenu').show()
self.UIManager.get_widget('/traymenu/pausemenu').hide()
if HAVE_STATUS_ICON:
self.statusicon.set_from_file(self.find_path('sonata.png'))
elif HAVE_EGG and self.eggtrayheight:
self.eggtrayfile = self.find_path('sonata.png')
self.trayimage.set_from_pixbuf(img.get_pixbuf_of_size(gtk.gdk.pixbuf_new_from_file(self.eggtrayfile), self.eggtrayheight)[0])
elif self.status['state'] == 'pause':
self.ppbutton.set_image(ui.image(stock=gtk.STOCK_MEDIA_PLAY, stocksize=gtk.ICON_SIZE_BUTTON))
self.ppbutton.get_child().get_child().get_children()[1].set_text('')
self.UIManager.get_widget('/traymenu/playmenu').show()
self.UIManager.get_widget('/traymenu/pausemenu').hide()
if HAVE_STATUS_ICON:
self.statusicon.set_from_file(self.find_path('sonata_pause.png'))
elif HAVE_EGG and self.eggtrayheight:
self.eggtrayfile = self.find_path('sonata_pause.png')
self.trayimage.set_from_pixbuf(img.get_pixbuf_of_size(gtk.gdk.pixbuf_new_from_file(self.eggtrayfile), self.eggtrayheight)[0])
elif self.status['state'] == 'play':
self.ppbutton.set_image(ui.image(stock=gtk.STOCK_MEDIA_PAUSE, stocksize=gtk.ICON_SIZE_BUTTON))
self.ppbutton.get_child().get_child().get_children()[1].set_text('')
self.UIManager.get_widget('/traymenu/playmenu').hide()
self.UIManager.get_widget('/traymenu/pausemenu').show()
if self.prevstatus != None:
if self.prevstatus['state'] == 'pause':
# Forces the notification to popup if specified
self.on_currsong_notify()
if HAVE_STATUS_ICON:
self.statusicon.set_from_file(self.find_path('sonata_play.png'))
elif HAVE_EGG and self.eggtrayheight:
self.eggtrayfile = self.find_path('sonata_play.png')
self.trayimage.set_from_pixbuf(img.get_pixbuf_of_size(gtk.gdk.pixbuf_new_from_file(self.eggtrayfile), self.eggtrayheight)[0])
self.playing_song_change()
if self.status_is_play_or_pause():
self.current.center_song_in_list()
if self.prevstatus is None or self.status['volume'] != self.prevstatus['volume']:
self.volumebutton.set_value(int(self.status['volume']))
if self.conn:
if mpdh.mpd_is_updating(self.status):
# MPD library is being updated
self.update_statusbar(True)
elif self.prevstatus is None or mpdh.mpd_is_updating(self.prevstatus) != mpdh.mpd_is_updating(self.status):
if not mpdh.mpd_is_updating(self.status):
# Done updating, refresh interface
self.mpd_updated_db()
elif self.mpd_update_queued:
# If the update happens too quickly, we won't catch it in
# our polling. So let's force an update of the interface:
self.mpd_updated_db()
self.mpd_update_queued = False
if self.config.as_enabled:
if self.prevstatus:
prevstate = self.prevstatus['state']
else:
prevstate = 'stop'
if self.status:
state = self.status['state']
else:
state = 'stop'
if state in ('play', 'pause'):
mpd_time_now = self.status['time']
self.scrobbler.handle_change_status(state, prevstate, self.prevsonginfo, self.songinfo, mpd_time_now)
elif state == 'stop':
self.scrobbler.handle_change_status(state, prevstate, self.prevsonginfo)
def mpd_updated_db(self):
self.library.view_caches_reset()
self.update_statusbar(False)
# We need to make sure that we update the artist in case tags have changed:
self.album_reset_artist()
self.album_get_artist()
# Now update the library and playlist tabs
if self.library.search_visible():
self.library.on_library_search_combo_change()
else:
self.library.library_browse(root=self.config.wd)
self.playlists.populate()
# Update info if it's visible:
self.info_update(True)
return False
def album_get_artist(self):
if self.songinfo and 'album' in self.songinfo:
self.album_return_artist_name()
elif self.songinfo and 'artist' in self.songinfo:
self.album_current_artist = [self.songinfo, mpdh.get(self.songinfo, 'artist')]
else:
self.album_current_artist = [self.songinfo, ""]
def handle_change_song(self):
# Called when one of the following items are changed for the current
# mpd song in the playlist:
# 1. Song tags or filename (e.g. if tags are edited)
# 2. Position in playlist (e.g. if playlist is sorted)
# Note that the song does not have to be playing; it can reflect the
# next song that will be played.
self.current.unbold_boldrow(self.current.prev_boldrow)
if self.status and 'song' in self.status:
row = int(self.status['song'])
self.current.boldrow(row)
if self.songinfo:
if not self.prevsonginfo or mpdh.get(self.songinfo, 'id') != mpdh.get(self.prevsonginfo, 'id'):
self.current.center_song_in_list()
self.current.prev_boldrow = row
self.album_get_artist()
self.update_cursong()
self.update_wintitle()
self.playing_song_change()
self.info_update(True)
def update_progressbar(self):
if self.status_is_play_or_pause():
at, length = [float(c) for c in self.status['time'].split(':')]
try:
newfrac = at/length
except:
newfrac = 0
else:
newfrac = 0
if not self.last_progress_frac or self.last_progress_frac != newfrac:
if newfrac >= 0 and newfrac <= 1:
self.progressbar.set_fraction(newfrac)
if self.conn:
if self.status_is_play_or_pause():
at, length = [int(c) for c in self.status['time'].split(':')]
at_time = misc.convert_time(at)
try:
time = misc.convert_time(mpdh.get(self.songinfo, 'time', 0, True))
newtime = at_time + " / " + time
except:
newtime = at_time
elif self.status:
newtime = ' '
else:
newtime = _('No Read Permission')
else:
newtime = _('Not Connected')
if not self.last_progress_text or self.last_progress_text != newtime:
self.progressbar.set_text(newtime)
def update_statusbar(self, updatingdb=False):
if self.config.show_statusbar:
if self.conn and self.status:
try:
days = None
hours = None
mins = None
total_time = misc.convert_time(self.current.total_time)
try:
mins = total_time.split(":")[-2]
hours = total_time.split(":")[-3]
if int(hours) >= 24:
days = str(int(hours)/24)
hours = str(int(hours) - int(days)*24).zfill(2)
except:
pass
if days:
days_text = gettext.ngettext('day', 'days', int(days))
if mins:
if mins.startswith('0') and len(mins) > 1:
mins = mins[1:]
mins_text = gettext.ngettext('minute', 'minutes', int(mins))
if hours:
if hours.startswith('0'):
hours = hours[1:]
hours_text = gettext.ngettext('hour', 'hours', int(hours))
# Show text:
songs_text = gettext.ngettext('song', 'songs', int(self.status['playlistlength']))
if int(self.status['playlistlength']) > 0:
if days:
status_text = str(self.status['playlistlength']) + ' ' + songs_text + ' ' + days + ' ' + days_text + ', ' + hours + ' ' + hours_text + ', ' + _('and') + ' ' + mins + ' ' + mins_text
elif hours:
status_text = str(self.status['playlistlength']) + ' ' + songs_text + ' ' + hours + ' ' + hours_text + ' ' + _('and') + ' ' + mins + ' ' + mins_text
elif mins:
status_text = str(self.status['playlistlength']) + ' ' + songs_text + ' ' + mins + ' ' + mins_text
else:
status_text = ""
else:
status_text = ""
if updatingdb:
status_text = status_text + " " + _("(updating mpd)")
except:
status_text = ""
else:
status_text = ""
if status_text != self.last_status_text:
self.statusbar.push(self.statusbar.get_context_id(""), status_text)
self.last_status_text = status_text
def expander_ellipse_workaround(self):
# Hacky workaround to ellipsize the expander - see
# http://bugzilla.gnome.org/show_bug.cgi?id=406528
cursonglabelwidth = self.expander.get_allocation().width - 15
if cursonglabelwidth > 0:
self.cursonglabel1.set_size_request(cursonglabelwidth, -1)
self.cursonglabel1.set_size_request(cursonglabelwidth, -1)
def update_cursong(self):
if self.status_is_play_or_pause():
# We must show the trayprogressbar and trayalbumeventbox
# before changing self.cursonglabel (and consequently calling
# self.on_currsong_notify()) in order to ensure that the notification
# popup will have the correct height when being displayed for
# the first time after a stopped state.
if self.config.show_progress:
self.trayprogressbar.show()
self.traycursonglabel2.show()
if self.config.show_covers:
self.trayalbumeventbox.show()
self.trayalbumimage2.show()
for label in (self.cursonglabel1, self.cursonglabel2, self.traycursonglabel1, self.traycursonglabel2):
label.set_ellipsize(pango.ELLIPSIZE_END)
self.expander_ellipse_workaround()
if len(self.config.currsongformat1) > 0:
newlabel1 = ('<big><b>%s </b></big>' %
formatting.parse(
self.config.currsongformat1,
self.songinfo, True))
else:
newlabel1 = '<big><b> </b></big>'
if len(self.config.currsongformat2) > 0:
newlabel2 = ('<small>%s </small>' %
formatting.parse(
self.config.currsongformat2,
self.songinfo, True))
else:
newlabel2 = '<small> </small>'
if newlabel1 != self.cursonglabel1.get_label():
self.cursonglabel1.set_markup(newlabel1)
if newlabel2 != self.cursonglabel2.get_label():
self.cursonglabel2.set_markup(newlabel2)
if newlabel1 != self.traycursonglabel1.get_label():
self.traycursonglabel1.set_markup(newlabel1)
if newlabel2 != self.traycursonglabel2.get_label():
self.traycursonglabel2.set_markup(newlabel2)
self.expander.set_tooltip_text(self.cursonglabel1.get_text() + "\n" + self.cursonglabel2.get_text())
else:
for label in (self.cursonglabel1, self.cursonglabel2, self.traycursonglabel1, self.cursonglabel2):
label.set_ellipsize(pango.ELLIPSIZE_NONE)
self.cursonglabel1.set_markup('<big><b>' + _('Stopped') + '</b></big>')
if self.config.expanded:
self.cursonglabel2.set_markup('<small>' + _('Click to collapse') + '</small>')
else:
self.cursonglabel2.set_markup('<small>' + _('Click to expand') + '</small>')
self.expander.set_tooltip_text(self.cursonglabel1.get_text())
if not self.conn:
self.traycursonglabel1.set_label(_('Not Connected'))
elif not self.status:
self.traycursonglabel1.set_label(_('No Read Permission'))
else:
self.traycursonglabel1.set_label(_('Stopped'))
self.trayprogressbar.hide()
self.trayalbumeventbox.hide()
self.trayalbumimage2.hide()
self.traycursonglabel2.hide()
self.update_infofile()
def update_wintitle(self):
if self.window_owner:
if self.status_is_play_or_pause():
newtitle = formatting.parse(
self.config.titleformat, self.songinfo,
False, True,
self.status.get('time', None))
else:
newtitle = '[Sonata]'
if not self.last_title or self.last_title != newtitle:
self.window.set_property('title', newtitle)
self.last_title = newtitle
def tooltip_set_window_width(self):
screen = self.window.get_screen()
_pscreen, px, py, _mods = screen.get_display().get_pointer()
monitor_num = screen.get_monitor_at_point(px, py)
monitor = screen.get_monitor_geometry(monitor_num)
self.notification_width = int(monitor.width * 0.30)
if self.notification_width > consts.NOTIFICATION_WIDTH_MAX:
self.notification_width = consts.NOTIFICATION_WIDTH_MAX
elif self.notification_width < consts.NOTIFICATION_WIDTH_MIN:
self.notification_width = consts.NOTIFICATION_WIDTH_MIN
def on_currsong_notify(self, _foo=None, _bar=None, force_popup=False):
if self.fullscreencoverart.get_property('visible'):
return
if self.sonata_loaded:
if self.status_is_play_or_pause():
if self.config.show_covers:
self.traytips.set_size_request(self.notification_width, -1)
else:
self.traytips.set_size_request(self.notification_width-100, -1)
else:
self.traytips.set_size_request(-1, -1)
if self.config.show_notification or force_popup:
try:
gobject.source_remove(self.traytips.notif_handler)
except:
pass
if self.status_is_play_or_pause():
try:
self.traytips.notifications_location = self.config.traytips_notifications_location
self.traytips.use_notifications_location = True
if HAVE_STATUS_ICON and self.statusicon.is_embedded() and self.statusicon.get_visible():
self.traytips._real_display(self.statusicon)
elif HAVE_EGG and self.trayicon.get_property('visible'):
self.traytips._real_display(self.trayeventbox)
else:
self.traytips._real_display(None)
if self.config.popup_option != len(self.popuptimes)-1:
if force_popup and not self.config.show_notification:
# Used -p argument and notification is disabled in
# player; default to 3 seconds
timeout = 3000
else:
timeout = int(self.popuptimes[self.config.popup_option])*1000
self.traytips.notif_handler = gobject.timeout_add(timeout, self.traytips.hide)
else:
# -1 indicates that the timeout should be forever.
# We don't want to pass None, because then Sonata
# would think that there is no current notification
self.traytips.notif_handler = -1
except:
pass
else:
self.traytips.hide()
elif self.traytips.get_property('visible'):
try:
self.traytips._real_display(self.trayeventbox)
except:
pass
def on_progressbar_notify_fraction(self, *_args):
self.trayprogressbar.set_fraction(self.progressbar.get_fraction())
def on_progressbar_notify_text(self, *_args):
self.trayprogressbar.set_text(self.progressbar.get_text())
def update_infofile(self):
if self.config.use_infofile is True:
try:
info_file = open(self.config.infofile_path, 'w')
if self.status['state'] in ['play']:
info_file.write('Status: ' + 'Playing' + '\n')
elif self.status['state'] in ['pause']:
info_file.write('Status: ' + 'Paused' + '\n')
elif self.status['state'] in ['stop']:
info_file.write('Status: ' + 'Stopped' + '\n')
try:
info_file.write('Title: ' + mpdh.get(self.songinfo, 'artist') + ' - ' + mpdh.get(self.songinfo, 'title') + '\n')
except:
try:
info_file.write('Title: ' + mpdh.get(self.songinfo, 'title') + '\n') # No Arist in streams
except:
info_file.write('Title: No - ID Tag\n')
info_file.write('Album: ' + mpdh.get(self.songinfo, 'album', 'No Data') + '\n')
info_file.write('Track: ' + mpdh.get(self.songinfo, 'track', '0') + '\n')
info_file.write('File: ' + mpdh.get(self.songinfo, 'file', 'No Data') + '\n')
info_file.write('Time: ' + mpdh.get(self.songinfo, 'time', '0') + '\n')
info_file.write('Volume: ' + self.status['volume'] + '\n')
info_file.write('Repeat: ' + self.status['repeat'] + '\n')
info_file.write('Random: ' + self.status['random'] + '\n')
info_file.close()
except:
pass
#################
# Gui Callbacks #
#################
def on_delete_event_yes(self, _widget):
self.exit_now = True
self.on_delete_event(None, None)
# This one makes sure the program exits when the window is closed
def on_delete_event(self, _widget, _data=None):
if not self.exit_now and self.config.minimize_to_systray:
if HAVE_STATUS_ICON and self.statusicon.is_embedded() and self.statusicon.get_visible():
self.withdraw_app()
return True
elif HAVE_EGG and self.trayicon.get_property('visible'):
self.withdraw_app()
return True
self.settings_save()
self.artwork.artwork_save_cache()
if self.config.as_enabled:
self.scrobbler.save_cache()
if self.conn and self.config.stop_on_exit:
self.mpd_stop(None)
sys.exit()
def on_window_configure(self, window, _event):
# When withdrawing an app, extra configure events (with wrong coords)
# are fired (at least on Openbox). This prevents a user from moving
# the window, withdrawing it, then unwithdrawing it and finding it in
# an older position
if not window.props.visible:
return
width, height = window.get_size()
if self.config.expanded: self.config.w, self.config.h = width, height
else: self.config.w = width
self.config.x, self.config.y = window.get_position()
self.expander_ellipse_workaround()
def on_notebook_resize(self, _widget, _event):
if not self.current.resizing_columns :
gobject.idle_add(self.header_save_column_widths)
gobject.idle_add(self.info.resize_elements, self.notebook.allocation)
def on_expand(self, _action):
if not self.config.expanded:
self.expander.set_expanded(False)
self.on_expander_activate(None)
self.expander.set_expanded(True)
def on_collapse(self, _action):
if self.config.expanded:
self.expander.set_expanded(True)
self.on_expander_activate(None)
self.expander.set_expanded(False)
def on_expander_activate(self, _expander):
currheight = self.window.get_size()[1]
self.config.expanded = False
# Note that get_expanded() will return the state of the expander
# before this current click
window_about_to_be_expanded = not self.expander.get_expanded()
if window_about_to_be_expanded:
if self.window.get_size()[1] == self.config.h:
# For WMs like ion3, the app will not actually resize
# when in collapsed mode, so prevent the waiting
# of the player to expand from happening:
skip_size_check = True
else:
skip_size_check = False
if self.config.show_statusbar:
self.statusbar.show()
self.notebook.show_all()
if self.config.show_statusbar:
ui.show(self.statusbar)
else:
ui.hide(self.statusbar)
self.notebook.hide()
if not self.status_is_play_or_pause():
if window_about_to_be_expanded:
self.cursonglabel2.set_markup('<small>' + _('Click to collapse') + '</small>')
else:
self.cursonglabel2.set_markup('<small>' + _('Click to expand') + '</small>')
# Now we wait for the height of the player to increase, so that
# we know the list is visible. This is pretty hacky, but works.
if self.window_owner:
if window_about_to_be_expanded:
if not skip_size_check:
while self.window.get_size()[1] == currheight:
gtk.main_iteration()
# Notebook is visible, now resize:
self.window.resize(self.config.w, self.config.h)
else:
self.window.resize(self.config.w, 1)
if window_about_to_be_expanded:
self.config.expanded = True
if self.status_is_play_or_pause():
gobject.idle_add(self.current.center_song_in_list)
self.window.set_geometry_hints(self.window)
if self.notebook_show_first_tab:
# Sonata was launched in collapsed state. Ensure we display
# first tab:
self.notebook_show_first_tab = False
self.notebook.set_current_page(0)
# Put focus to the notebook:
self.on_notebook_page_change(self.notebook, 0, self.notebook.get_current_page())
# This callback allows the user to seek to a specific portion of the song
def on_progressbar_press(self, _widget, event):
if event.button == 1:
if self.status_is_play_or_pause():
at, length = [int(c) for c in self.status['time'].split(':')]
try:
pbsize = self.progressbar.allocation
if misc.is_lang_rtl(self.window):
seektime = int(((pbsize.width-event.x)/pbsize.width) * length)
else:
seektime = int((event.x/pbsize.width) * length)
self.seek(int(self.status['song']), seektime)
except:
pass
return True
def on_progressbar_scroll(self, _widget, event):
if self.status_is_play_or_pause():
try:
gobject.source_remove(self.seekidle)
except:
pass
self.seekidle = gobject.idle_add(self._seek_when_idle, event.direction)
return True
def _seek_when_idle(self, direction):
at, _length = [int(c) for c in self.status['time'].split(':')]
try:
if direction == gtk.gdk.SCROLL_UP:
seektime = max(0, at + 5)
elif direction == gtk.gdk.SCROLL_DOWN:
seektime = min(mpdh.get(self.songinfo, 'time'),
at - 5)
self.seek(int(self.status['song']), seektime)
except:
pass
def on_lyrics_search(self, _event):
artist = mpdh.get(self.songinfo, 'artist')
title = mpdh.get(self.songinfo, 'title')
dialog = ui.dialog(title=_('Lyrics Search'), parent=self.window, flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_FIND, gtk.RESPONSE_ACCEPT), role='lyricsSearch', default=gtk.RESPONSE_ACCEPT)
dialog.action_area.get_children()[0].set_label(_("_Search"))
dialog.action_area.get_children()[0].set_image(ui.image(stock=gtk.STOCK_FIND))
artist_hbox = gtk.HBox()
artist_label = ui.label(text=_('Artist Name:'))
artist_hbox.pack_start(artist_label, False, False, 5)
artist_entry = ui.entry(text=artist)
artist_hbox.pack_start(artist_entry, True, True, 5)
title_hbox = gtk.HBox()
title_label = ui.label(text=_('Song Title:'))
title_hbox.pack_start(title_label, False, False, 5)
title_entry = ui.entry(title)
title_hbox.pack_start(title_entry, True, True, 5)
ui.set_widths_equal([artist_label, title_label])
dialog.vbox.pack_start(artist_hbox)
dialog.vbox.pack_start(title_hbox)
ui.show(dialog.vbox)
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
dialog.destroy()
# Delete current lyrics:
filename = self.info.target_lyrics_filename(artist, title, None, consts.LYRICS_LOCATION_HOME)
misc.remove_file(filename)
# Search for new lyrics:
self.info.get_lyrics_start(artist_entry.get_text(), title_entry.get_text(), artist, title, os.path.dirname(mpdh.get(self.songinfo, 'file')))
else:
dialog.destroy()
def mpd_shuffle(self, _action):
if self.conn:
if not self.status or self.status['playlistlength'] == '0':
return
ui.change_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
while gtk.events_pending():
gtk.main_iteration()
mpdh.call(self.client, 'shuffle')
def on_menu_popup(self, _widget):
self.update_menu_visibility()
gobject.idle_add(self.mainmenu.popup, None, None, self.menu_position, 3, 0)
def on_updatedb(self, _action):
if self.conn:
if self.library.search_visible():
self.library.on_search_end(None)
mpdh.update(self.client, '/', self.status)
self.mpd_update_queued = True
def on_updatedb_shortcut(self, _action):
# If no songs selected, update view. Otherwise update
# selected items.
if self.library.not_parent_is_selected():
self.on_updatedb_path(True)
else:
self.on_updatedb_path(False)
def on_updatedb_path(self, selected_only):
if self.conn and self.current_tab == self.TAB_LIBRARY:
if self.library.search_visible():
self.library.on_search_end(None)
filenames = self.library.get_path_child_filenames(True, selected_only)
if len(filenames) > 0:
mpdh.update(self.client, filenames, self.status)
self.mpd_update_queued = True
def on_image_activate(self, widget, event):
self.window.handler_block(self.mainwinhandler)
if event.button == 1 and widget == self.info_imagebox and self.artwork.have_last():
if not self.config.info_art_enlarged:
self.info_imagebox.set_size_request(-1, -1)
self.artwork.artwork_set_image_last()
self.config.info_art_enlarged = True
else:
self.info_imagebox.set_size_request(152, -1)
self.artwork.artwork_set_image_last()
self.config.info_art_enlarged = False
# Force a resize of the info labels, if needed:
gobject.idle_add(self.on_notebook_resize, self.notebook, None)
elif event.button == 1 and widget != self.info_imagebox:
if self.config.expanded:
if self.current_tab != self.TAB_INFO:
self.img_clicked = True
self.switch_to_tab_name(self.TAB_INFO)
self.img_clicked = False
else:
self.switch_to_tab_name(self.last_tab)
elif event.button == 3:
artist = None
album = None
stream = None
if self.status_is_play_or_pause():
self.UIManager.get_widget('/imagemenu/chooseimage_menu/').show()
self.UIManager.get_widget('/imagemenu/localimage_menu/').show()
artist = mpdh.get(self.songinfo, 'artist', None)
album = mpdh.get(self.songinfo, 'album', None)
stream = mpdh.get(self.songinfo, 'name', None)
if not (artist or album or stream):
self.UIManager.get_widget('/imagemenu/localimage_menu/').hide()
self.UIManager.get_widget('/imagemenu/resetimage_menu/').hide()
self.UIManager.get_widget('/imagemenu/chooseimage_menu/').hide()
self.imagemenu.popup(None, None, None, event.button, event.time)
gobject.timeout_add(50, self.on_image_activate_after)
return False
def on_image_motion_cb(self, _widget, context, _x, _y, time):
context.drag_status(gtk.gdk.ACTION_COPY, time)
return True
def on_image_drop_cb(self, _widget, _context, _x, _y, selection, _info, _time):
if self.status_is_play_or_pause():
uri = selection.data.strip()
path = urllib.url2pathname(uri)
paths = path.rsplit('\n')
thread = threading.Thread(target=self.on_image_drop_cb_thread, args=(paths,))
thread.setDaemon(True)
thread.start()
def on_image_drop_cb_thread(self, paths):
for i, path in enumerate(paths):
remove_after_set = False
paths[i] = path.rstrip('\r')
# Clean up (remove preceding "file://" or "file:")
if paths[i].startswith('file://'):
paths[i] = paths[i][7:]
elif paths[i].startswith('file:'):
paths[i] = paths[i][5:]
elif re.match('^(https?|ftp)://', paths[i]):
try:
# Eliminate query arguments and extract extension & filename
path = urllib.splitquery(paths[i])[0]
extension = os.path.splitext(path)[1][1:]
filename = os.path.split(path)[1]
if img.extension_is_valid(extension):
# Save to temp dir.. we will delete the image afterwards
dest_file = os.path.expanduser('~/.covers/temp/' + filename)
misc.create_dir('~/.covers/temp')
urllib.urlretrieve(paths[i], dest_file)
paths[i] = dest_file
remove_after_set = True
else:
continue
except:
# cleanup undone file
misc.remove_file(paths[i])
raise
paths[i] = os.path.abspath(paths[i])
if img.valid_image(paths[i]):
stream = mpdh.get(self.songinfo, 'name', None)
if stream is not None:
dest_filename = self.artwork.artwork_stream_filename(mpdh.get(self.songinfo, 'name'))
else:
dest_filename = self.target_image_filename()
if dest_filename != paths[i]:
shutil.copyfile(paths[i], dest_filename)
self.artwork.artwork_update(True)
if remove_after_set:
misc.remove_file(paths[i])
def target_image_filename(self, force_location=None, songpath=None, artist=None, album=None):
# Only pass songpath, artist, and album if we are trying to get the
# filename for an album that isn't currently playing
if self.conn:
# If no info passed, you info from currently playing song:
if not album:
album = mpdh.get(self.songinfo, 'album', "")
if not artist:
artist = self.album_current_artist[1]
album = album.replace("/", "")
artist = artist.replace("/", "")
if songpath is None:
songpath = os.path.dirname(mpdh.get(self.songinfo, 'file'))
# Return target filename:
if force_location is not None:
art_loc = force_location
else:
art_loc = self.config.art_location
if art_loc == consts.ART_LOCATION_HOMECOVERS:
targetfile = os.path.join(os.path.expanduser("~/.covers"), "%s-%s.jpg" % (artist, album))
elif art_loc == consts.ART_LOCATION_COVER:
targetfile = os.path.join(self.config.musicdir[self.config.profile_num], songpath, "cover.jpg")
elif art_loc == consts.ART_LOCATION_FOLDER:
targetfile = os.path.join(self.config.musicdir[self.config.profile_num], songpath, "folder.jpg")
elif art_loc == consts.ART_LOCATION_ALBUM:
targetfile = os.path.join(self.config.musicdir[self.config.profile_num], songpath, "album.jpg")
elif art_loc == consts.ART_LOCATION_CUSTOM:
targetfile = os.path.join(self.config.musicdir[self.config.profile_num], songpath, self.config.art_location_custom_filename)
targetfile = misc.file_exists_insensitive(targetfile)
return misc.file_from_utf8(targetfile)
def album_return_artist_and_tracks(self):
# Includes logic for Various Artists albums to determine
# the tracks.
datalist = []
album = mpdh.get(self.songinfo, 'album')
songs, _playtime, _num_songs = self.library.library_return_search_items(album=album)
for song in songs:
year = mpdh.get(song, 'date', '')
artist = mpdh.get(song, 'artist', '')
path = os.path.dirname(mpdh.get(song, 'file'))
data = library.library_set_data(album=album, artist=artist, year=year, path=path)
datalist.append(data)
if len(datalist) > 0:
datalist = misc.remove_list_duplicates(datalist, case=False)
datalist = self.library.list_identify_VA_albums(datalist)
if len(datalist) > 0:
# Multiple albums with same name and year, choose the right one. If we have
# a VA album, compare paths. Otherwise, compare artists.
for dataitem in datalist:
if unicode(library.library_get_data(dataitem, 'artist')).lower() == unicode(mpdh.get(self.songinfo, 'artist')).lower() \
or (library.library_get_data(dataitem, 'artist') == self.library.get_VAstr() and library.library_get_data(dataitem, 'path') == os.path.dirname(mpdh.get(self.songinfo, 'file'))):
datalist = [dataitem]
break
# Find all songs in album:
retsongs = []
for song in songs:
if unicode(mpdh.get(song, 'album')).lower() == unicode(library.library_get_data(datalist[0], 'album')).lower() \
and mpdh.get(song, 'date', None) == library.library_get_data(datalist[0], 'year'):
if library.library_get_data(datalist[0], 'artist') == self.library.get_VAstr() \
or unicode(library.library_get_data(datalist[0], 'artist')).lower() == unicode(mpdh.get(song, 'artist')).lower():
retsongs.append(song)
artist = library.library_get_data(datalist[0], 'artist')
return artist, retsongs
else:
return None, None
def album_return_artist_name(self):
# Determine if album_name is a various artists album.
if self.album_current_artist[0] == self.songinfo:
return
artist, _tracks = self.album_return_artist_and_tracks()
if artist is not None:
self.album_current_artist = [self.songinfo, artist]
else:
self.album_current_artist = [self.songinfo, ""]
def album_reset_artist(self):
self.album_current_artist = [None, ""]
def on_image_activate_after(self):
self.window.handler_unblock(self.mainwinhandler)
def update_preview(self, file_chooser, preview):
filename = file_chooser.get_preview_filename()
pixbuf = None
try:
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(filename, 128, 128)
except:
pass
if pixbuf is None:
try:
pixbuf = gtk.gdk.PixbufAnimation(filename).get_static_image()
width = pixbuf.get_width()
height = pixbuf.get_height()
if width > height:
pixbuf = pixbuf.scale_simple(128, int(float(height)/width*128), gtk.gdk.INTERP_HYPER)
else:
pixbuf = pixbuf.scale_simple(int(float(width)/height*128), 128, gtk.gdk.INTERP_HYPER)
except:
pass
if pixbuf is None:
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 1, 8, 128, 128)
pixbuf.fill(0x00000000)
preview.set_from_pixbuf(pixbuf)
have_preview = True
file_chooser.set_preview_widget_active(have_preview)
del pixbuf
self.call_gc_collect = True
def image_local(self, _widget):
dialog = gtk.FileChooserDialog(
title=_("Open Image"),
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN,gtk.RESPONSE_OK))
filefilter = gtk.FileFilter()
filefilter.set_name(_("Images"))
filefilter.add_pixbuf_formats()
dialog.add_filter(filefilter)
filefilter = gtk.FileFilter()
filefilter.set_name(_("All files"))
filefilter.add_pattern("*")
dialog.add_filter(filefilter)
preview = ui.image()
dialog.set_preview_widget(preview)
dialog.set_use_preview_label(False)
dialog.connect("update-preview", self.update_preview, preview)
stream = mpdh.get(self.songinfo, 'name', None)
album = mpdh.get(self.songinfo, 'album', "").replace("/", "")
artist = self.album_current_artist[1].replace("/", "")
dialog.connect("response", self.image_local_response, artist, album, stream)
dialog.set_default_response(gtk.RESPONSE_OK)
songdir = os.path.dirname(mpdh.get(self.songinfo, 'file'))
currdir = misc.file_from_utf8(os.path.join(self.config.musicdir[self.config.profile_num], songdir))
if self.config.art_location != consts.ART_LOCATION_HOMECOVERS:
dialog.set_current_folder(currdir)
if stream is not None:
# Allow saving an image file for a stream:
self.local_dest_filename = self.artwork.artwork_stream_filename(stream)
else:
self.local_dest_filename = self.target_image_filename()
dialog.show()
def image_local_response(self, dialog, response, _artist, _album, _stream):
if response == gtk.RESPONSE_OK:
filename = dialog.get_filenames()[0]
# Copy file to covers dir:
if self.local_dest_filename != filename:
shutil.copyfile(filename, self.local_dest_filename)
# And finally, set the image in the interface:
self.artwork.artwork_update(True)
# Force a resize of the info labels, if needed:
gobject.idle_add(self.on_notebook_resize, self.notebook, None)
dialog.destroy()
def imagelist_append(self, elem):
self.imagelist.append(elem)
def remotefilelist_append(self, elem):
self.remotefilelist.append(elem)
def image_remote(self, _widget):
self.choose_dialog = ui.dialog(title=_("Choose Cover Art"), parent=self.window, flags=gtk.DIALOG_MODAL, buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT), role='chooseCoverArt', default=gtk.RESPONSE_ACCEPT, separator=False, resizable=False)
choosebutton = self.choose_dialog.add_button(_("C_hoose"), gtk.RESPONSE_ACCEPT)
chooseimage = ui.image(stock=gtk.STOCK_CONVERT, stocksize=gtk.ICON_SIZE_BUTTON)
choosebutton.set_image(chooseimage)
self.imagelist = gtk.ListStore(int, gtk.gdk.Pixbuf)
# Setting col=2 only shows 1 column with gtk 2.16 while col=-1 shows 2
imagewidget = ui.iconview(col=-1, space=0, margin=0, itemw=75, selmode=gtk.SELECTION_SINGLE)
scroll = ui.scrollwindow(policy_x=gtk.POLICY_NEVER, policy_y=gtk.POLICY_ALWAYS, w=360, h=325, add=imagewidget)
self.choose_dialog.vbox.pack_start(scroll, False, False, 0)
hbox = gtk.HBox()
vbox = gtk.VBox()
vbox.pack_start(ui.label(markup='<small> </small>'), False, False, 0)
self.remote_artistentry = ui.entry()
self.remote_albumentry = ui.entry()
text = [("Artist"), _("Album")]
labels = [ui.label(text=labelname + ": ") for labelname in text]
entries = [self.remote_artistentry, self.remote_albumentry]
for entry, label in zip(entries, labels):
tmphbox = gtk.HBox()
tmphbox.pack_start(label, False, False, 5)
entry.connect('activate', self.image_remote_refresh, imagewidget)
tmphbox.pack_start(entry, True, True, 5)
vbox.pack_start(tmphbox)
ui.set_widths_equal(labels)
vbox.pack_start(ui.label(markup='<small> </small>'), False, False, 0)
hbox.pack_start(vbox, True, True, 5)
vbox2 = gtk.VBox()
vbox2.pack_start(ui.label(" "))
refreshbutton = ui.button(text=_('_Update'), img=ui.image(stock=gtk.STOCK_REFRESH))
refreshbutton.connect('clicked', self.image_remote_refresh, imagewidget)
vbox2.pack_start(refreshbutton, False, False, 5)
vbox2.pack_start(ui.label(" "))
hbox.pack_start(vbox2, False, False, 15)
searchexpander = ui.expander(text=_("Edit search terms"))
searchexpander.add(hbox)
self.choose_dialog.vbox.pack_start(searchexpander, True, True, 0)
self.choose_dialog.show_all()
self.chooseimage_visible = True
self.remotefilelist = []
stream = mpdh.get(self.songinfo, 'name', None)
if stream is not None:
# Allow saving an image file for a stream:
self.remote_dest_filename = self.artwork.artwork_stream_filename(stream)
else:
self.remote_dest_filename = self.target_image_filename()
album = mpdh.get(self.songinfo, 'album', '')
artist = self.album_current_artist[1]
imagewidget.connect('item-activated', self.image_remote_replace_cover, artist.replace("/", ""), album.replace("/", ""), stream)
self.choose_dialog.connect('response', self.image_remote_response, imagewidget, artist, album, stream)
self.remote_artistentry.set_text(artist)
self.remote_albumentry.set_text(album)
self.allow_art_search = True
self.image_remote_refresh(None, imagewidget)
def image_remote_refresh(self, _entry, imagewidget):
if not self.allow_art_search:
return
self.allow_art_search = False
self.artwork.artwork_stop_update()
while self.artwork.artwork_is_downloading_image():
gtk.main_iteration()
self.imagelist.clear()
imagewidget.set_text_column(-1)
imagewidget.set_model(self.imagelist)
imagewidget.set_pixbuf_column(1)
ui.focus(imagewidget)
ui.change_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
thread = threading.Thread(target=self._image_remote_refresh, args=(imagewidget, None))
thread.setDaemon(True)
thread.start()
def _image_remote_refresh(self, imagewidget, _ignore):
self.artwork.stop_art_update = False
# Retrieve all images from rhapsody:
artist_search = self.remote_artistentry.get_text()
album_search = self.remote_albumentry.get_text()
if len(artist_search) == 0 and len(album_search) == 0:
gobject.idle_add(self.image_remote_no_tag_found, imagewidget)
return
filename = os.path.expanduser("~/.covers/temp/<imagenum>.jpg")
misc.remove_dir_recursive(os.path.dirname(filename))
misc.create_dir(os.path.dirname(filename))
imgfound = self.artwork.artwork_download_img_to_file(artist_search, album_search, filename, True)
ui.change_cursor(None)
if self.chooseimage_visible:
if not imgfound:
gobject.idle_add(self.image_remote_no_covers_found, imagewidget)
self.call_gc_collect = True
def image_remote_no_tag_found(self, imagewidget):
self.image_remote_warning(imagewidget, _("No artist or album name found."))
def image_remote_no_covers_found(self, imagewidget):
self.image_remote_warning(imagewidget, _("No cover art found."))
def image_remote_warning(self, imagewidget, msgstr):
liststore = gtk.ListStore(int, str)
liststore.append([0, msgstr])
imagewidget.set_pixbuf_column(-1)
imagewidget.set_model(liststore)
imagewidget.set_text_column(1)
ui.change_cursor(None)
self.allow_art_search = True
def image_remote_response(self, dialog, response_id, imagewidget, artist, album, stream):
self.artwork.artwork_stop_update()
if response_id == gtk.RESPONSE_ACCEPT:
try:
self.image_remote_replace_cover(imagewidget, imagewidget.get_selected_items()[0], artist, album, stream)
# Force a resize of the info labels, if needed:
gobject.idle_add(self.on_notebook_resize, self.notebook, None)
except:
dialog.destroy()
else:
dialog.destroy()
ui.change_cursor(None)
self.chooseimage_visible = False
def image_remote_replace_cover(self, _iconview, path, _artist, _album, _stream):
self.artwork.artwork_stop_update()
image_num = int(path[0])
if len(self.remotefilelist) > 0:
filename = self.remotefilelist[image_num]
if os.path.exists(filename):
shutil.move(filename, self.remote_dest_filename)
# And finally, set the image in the interface:
self.artwork.artwork_update(True)
# Clean up..
misc.remove_dir_recursive(os.path.dirname(filename))
self.chooseimage_visible = False
self.choose_dialog.destroy()
while self.artwork.artwork_is_downloading_image():
gtk.main_iteration()
def fullscreen_cover_art(self, _widget):
if self.fullscreencoverart.get_property('visible'):
self.fullscreencoverart.hide()
else:
self.traytips.hide()
self.artwork.fullscreen_cover_art_set_image(force_update=True)
self.fullscreencoverart.show_all()
def fullscreen_cover_art_close(self, _widget, event, key_press):
if key_press:
shortcut = gtk.accelerator_name(event.keyval, event.state)
shortcut = shortcut.replace("<Mod2>", "")
if shortcut != 'Escape':
return
self.fullscreencoverart.hide()
def header_save_column_widths(self):
if not self.config.withdrawn and self.config.expanded:
windowwidth = self.window.allocation.width
if windowwidth <= 10 or self.current.columns[0].get_width() <= 10:
# Make sure we only set self.config.columnwidths if self.current
# has its normal allocated width:
return
notebookwidth = self.notebook.allocation.width
treewidth = 0
for i, column in enumerate(self.current.columns):
colwidth = column.get_width()
treewidth += colwidth
if i == len(self.current.columns)-1 and treewidth <= windowwidth:
self.config.columnwidths[i] = min(colwidth, column.get_fixed_width())
else:
self.config.columnwidths[i] = colwidth
if treewidth > notebookwidth:
self.current.expanderwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
else:
self.current.expanderwindow.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.current.resizing_columns = False
def systemtray_menu(self, status_icon, button, activate_time):
self.traymenu.popup(None, None, gtk.status_icon_position_menu, button, activate_time, status_icon)
def systemtray_activate(self, _status_icon):
# Clicking on a gtk.StatusIcon:
if not self.ignore_toggle_signal:
# This prevents the user clicking twice in a row quickly
# and having the second click not revert to the intial
# state
self.ignore_toggle_signal = True
prev_state = self.UIManager.get_widget('/traymenu/showmenu').get_active()
self.UIManager.get_widget('/traymenu/showmenu').set_active(not prev_state)
if not self.window.window:
# For some reason, self.window.window is not defined if mpd is not running
# and sonata is started with self.config.withdrawn = True
self.withdraw_app_undo()
elif not (self.window.window.get_state() & gtk.gdk.WINDOW_STATE_WITHDRAWN) and self.window.is_active():
# Window is not withdrawn and is active (has toplevel focus):
self.withdraw_app()
else:
self.withdraw_app_undo()
# This prevents the tooltip from popping up again until the user
# leaves and enters the trayicon again
#if self.traytips.notif_handler is None and self.traytips.notif_handler != -1:
#self.traytips._remove_timer()
gobject.timeout_add(100, self.tooltip_set_ignore_toggle_signal_false)
def tooltip_show_manually(self):
# Since there is no signal to connect to when the user puts their
# mouse over the trayicon, we will check the mouse position
# manually and show/hide the window as appropriate. This is called
# every iteration. Note: This should not occur if self.traytips.notif_
# handler has a value, because that means that the tooltip is already
# visible, and we don't want to override that setting simply because
# the user's cursor is not over the tooltip.
if self.traymenu.get_property('visible') and self.traytips.notif_handler != -1:
self.traytips._remove_timer()
elif not self.traytips.notif_handler:
_pscreen, px, py, _mods = self.window.get_screen().get_display().get_pointer()
_icon_screen, icon_rect, _icon_orient = self.statusicon.get_geometry()
x = icon_rect[0]
y = icon_rect[1]
width = icon_rect[2]
height = icon_rect[3]
if px >= x and px <= x+width and py >= y and py <= y+height:
self.traytips._start_delay(self.statusicon)
else:
self.traytips._remove_timer()
def systemtray_click(self, _widget, event):
# Clicking on an egg system tray icon:
if event.button == 1 and not self.ignore_toggle_signal: # Left button shows/hides window(s)
self.systemtray_activate(None)
elif event.button == 2: # Middle button will play/pause
if self.conn:
self.mpd_pp(self.trayeventbox)
elif event.button == 3: # Right button pops up menu
self.traymenu.popup(None, None, None, event.button, event.time)
return False
def on_traytips_press(self, _widget, _event):
if self.traytips.get_property('visible'):
self.traytips._remove_timer()
def withdraw_app_undo(self):
self.window.move(self.config.x, self.config.y)
if not self.config.expanded:
self.notebook.set_no_show_all(True)
self.statusbar.set_no_show_all(True)
self.window.show_all()
self.notebook.set_no_show_all(False)
self.config.withdrawn = False
self.UIManager.get_widget('/traymenu/showmenu').set_active(True)
if self.notebook_show_first_tab and self.config.expanded:
# Sonata was launched in withdrawn state. Ensure we display
# first tab:
self.notebook_show_first_tab = False
self.notebook.set_current_page(0)
self.withdraw_app_undo_present_and_focus()
def withdraw_app_undo_present_and_focus(self):
self.window.present() # Helps to raise the window (useful against focus stealing prevention)
self.window.grab_focus()
if self.config.sticky:
self.window.stick()
if self.config.ontop:
self.window.set_keep_above(True)
def withdraw_app(self):
if HAVE_EGG or HAVE_STATUS_ICON:
# Save the playlist column widths before withdrawing the app.
# Otherwise we will not be able to correctly save the column
# widths if the user quits sonata while it is withdrawn.
self.header_save_column_widths()
self.window.hide()
self.config.withdrawn = True
self.UIManager.get_widget('/traymenu/showmenu').set_active(False)
def on_withdraw_app_toggle(self, _action):
if self.ignore_toggle_signal:
return
self.ignore_toggle_signal = True
if self.UIManager.get_widget('/traymenu/showmenu').get_active():
self.withdraw_app_undo()
else:
self.withdraw_app()
gobject.timeout_add(500, self.tooltip_set_ignore_toggle_signal_false)
def tooltip_set_ignore_toggle_signal_false(self):
self.ignore_toggle_signal = False
# Change volume on mousewheel over systray icon:
def systemtray_scroll(self, widget, event):
if self.conn:
self.volumebutton.emit("scroll-event", event)
def systemtray_size(self, widget, _allocation):
if widget.allocation.height <= 5:
# For vertical panels, height can be 1px, so use width
size = widget.allocation.width
else:
size = widget.allocation.height
if not self.eggtrayheight or self.eggtrayheight != size:
self.eggtrayheight = size
if size > 5 and self.eggtrayfile:
self.trayimage.set_from_pixbuf(img.get_pixbuf_of_size(gtk.gdk.pixbuf_new_from_file(self.eggtrayfile), self.eggtrayheight)[0])
def switch_to_tab_name(self, tab_name):
self.notebook.set_current_page(self.notebook_get_tab_num(self.notebook, tab_name))
def switch_to_tab_num(self, tab_num):
vis_tabnum = self.notebook_get_visible_tab_num(self.notebook, tab_num)
if vis_tabnum != -1:
self.notebook.set_current_page(vis_tabnum)
def switch_to_next_tab(self, _action):
self.notebook.next_page()
def switch_to_prev_tab(self, _action):
self.notebook.prev_page()
# Volume control
def on_volume_lower(self, _action):
new_volume = int(self.volumebutton.get_value()) - 5
self.volumebutton.set_value(new_volume)
def on_volume_raise(self, _action):
new_volume = int(self.volumebutton.get_value()) + 5
self.volumebutton.set_value(new_volume)
def on_volume_change(self, _button, new_volume):
mpdh.call(self.client, 'setvol', int(new_volume))
def mpd_pp(self, _widget, _key=None):
if self.conn and self.status:
if self.status['state'] in ('stop', 'pause'):
mpdh.call(self.client, 'play')
elif self.status['state'] == 'play':
mpdh.call(self.client, 'pause', '1')
self.iterate_now()
def mpd_stop(self, _widget, _key=None):
if self.conn:
mpdh.call(self.client, 'stop')
self.iterate_now()
def mpd_prev(self, _widget, _key=None):
if self.conn:
mpdh.call(self.client, 'previous')
self.iterate_now()
def mpd_next(self, _widget, _key=None):
if self.conn:
mpdh.call(self.client, 'next')
self.iterate_now()
def on_remove(self, _widget):
if self.conn:
model = None
while gtk.events_pending():
gtk.main_iteration()
if self.current_tab == self.TAB_CURRENT:
self.current.on_remove()
elif self.current_tab == self.TAB_PLAYLISTS:
treeviewsel = self.playlists_selection
model, selected = treeviewsel.get_selected_rows()
if ui.show_msg(self.window, gettext.ngettext("Delete the selected playlist?", "Delete the selected playlists?", int(len(selected))), gettext.ngettext("Delete Playlist", "Delete Playlists", int(len(selected))), 'deletePlaylist', gtk.BUTTONS_YES_NO) == gtk.RESPONSE_YES:
iters = [model.get_iter(path) for path in selected]
for i in iters:
mpdh.call(self.client, 'rm', misc.unescape_html(self.playlistsdata.get_value(i, 1)))
self.playlists.populate()
elif self.current_tab == self.TAB_STREAMS:
treeviewsel = self.streams_selection
model, selected = treeviewsel.get_selected_rows()
if ui.show_msg(self.window, gettext.ngettext("Delete the selected stream?", "Delete the selected streams?", int(len(selected))), gettext.ngettext("Delete Stream", "Delete Streams", int(len(selected))), 'deleteStreams', gtk.BUTTONS_YES_NO) == gtk.RESPONSE_YES:
iters = [model.get_iter(path) for path in selected]
for i in iters:
stream_removed = False
for j in range(len(self.config.stream_names)):
if not stream_removed:
if self.streamsdata.get_value(i, 1) == misc.escape_html(self.config.stream_names[j]):
self.config.stream_names.pop(j)
self.config.stream_uris.pop(j)
stream_removed = True
self.streams.populate()
self.iterate_now()
# Attempt to retain selection in the vicinity..
if model and len(model) > 0:
try:
# Use top row in selection...
selrow = 999999
for row in selected:
if row[0] < selrow:
selrow = row[0]
if selrow >= len(model):
selrow = len(model)-1
treeviewsel.select_path(selrow)
except:
pass
def mpd_clear(self, _widget):
if self.conn:
mpdh.call(self.client, 'clear')
self.iterate_now()
def _toggle_clicked(self, command, widget):
mpdh.call(self.client, command, int(widget.get_active()))
def on_repeat_clicked(self, widget):
if self.conn:
self._toggle_clicked('repeat', widget)
def on_random_clicked(self, widget):
if self.conn:
self._toggle_clicked('random', widget)
def setup_prefs_callbacks(self):
trayicon_available = HAVE_EGG or HAVE_STATUS_ICON
extras = preferences.Extras_cbs
extras.popuptimes = self.popuptimes
extras.notif_toggled = self.prefs_notif_toggled
extras.crossfade_toggled = self.prefs_crossfade_toggled
extras.crossfade_changed = self.prefs_crossfade_changed
display = preferences.Display_cbs
display.stylized_toggled = self.prefs_stylized_toggled
display.art_toggled = self.prefs_art_toggled
display.playback_toggled = self.prefs_playback_toggled
display.progress_toggled = self.prefs_progress_toggled
display.statusbar_toggled = self.prefs_statusbar_toggled
display.lyrics_toggled = self.prefs_lyrics_toggled
display.trayicon_available = trayicon_available
behavior = preferences.Behavior_cbs
behavior.trayicon_toggled = self.prefs_trayicon_toggled
behavior.sticky_toggled = self.prefs_sticky_toggled
behavior.ontop_toggled = self.prefs_ontop_toggled
behavior.decorated_toggled = self.prefs_decorated_toggled
behavior.infofile_changed = self.prefs_infofile_changed
format = preferences.Format_cbs
format.currentoptions_changed = self.prefs_currentoptions_changed
format.libraryoptions_changed = self.prefs_libraryoptions_changed
format.titleoptions_changed = self.prefs_titleoptions_changed
format.currsongoptions1_changed = self.prefs_currsongoptions1_changed
format.currsongoptions2_changed = self.prefs_currsongoptions2_changed
def on_prefs(self, _widget):
trayicon_in_use = ((HAVE_STATUS_ICON and
self.statusicon.is_embedded() and
self.statusicon.get_visible())
or
(HAVE_EGG and
self.trayicon.get_property('visible')))
preferences.Behavior_cbs.trayicon_in_use = trayicon_in_use
self.preferences.on_prefs_real()
def prefs_currentoptions_changed(self, entry, _event):
if self.config.currentformat != entry.get_text():
self.config.currentformat = entry.get_text()
for column in self.current_treeview.get_columns():
self.current_treeview.remove_column(column)
self.current.initialize_columns()
self.current.update_format()
def prefs_libraryoptions_changed(self, entry, _event):
if self.config.libraryformat != entry.get_text():
self.config.libraryformat = entry.get_text()
self.library.library_browse(root=self.config.wd)
def prefs_titleoptions_changed(self, entry, _event):
if self.config.titleformat != entry.get_text():
self.config.titleformat = entry.get_text()
self.update_wintitle()
def prefs_currsongoptions1_changed(self, entry, _event):
if self.config.currsongformat1 != entry.get_text():
self.config.currsongformat1 = entry.get_text()
self.update_cursong()
def prefs_currsongoptions2_changed(self, entry, _event):
if self.config.currsongformat2 != entry.get_text():
self.config.currsongformat2 = entry.get_text()
self.update_cursong()
def prefs_ontop_toggled(self, button):
self.config.ontop = button.get_active()
if self.window_owner:
self.window.set_keep_above(self.config.ontop)
def prefs_sticky_toggled(self, button):
self.config.sticky = button.get_active()
if self.window_owner:
if self.config.sticky:
self.window.stick()
else:
self.window.unstick()
def prefs_decorated_toggled(self, button, prefs_window):
self.config.decorated = not button.get_active()
if self.window_owner:
if self.config.decorated != self.window.get_decorated():
self.withdraw_app()
self.window.set_decorated(self.config.decorated)
self.withdraw_app_undo()
prefs_window.present()
def prefs_infofile_changed(self, entry, _event):
if self.config.infofile_path != entry.get_text():
self.config.infofile_path = os.path.expanduser(entry.get_text())
if self.config.use_infofile:
self.update_infofile()
def prefs_crossfade_changed(self, crossfade_spin):
crossfade_value = crossfade_spin.get_value_as_int()
mpdh.call(self.client, 'crossfade', crossfade_value)
def prefs_crossfade_toggled(self, button, crossfade_spin):
crossfade_value = crossfade_spin.get_value_as_int()
if button.get_active():
mpdh.call(self.client, 'crossfade', crossfade_value)
else:
mpdh.call(self.client, 'crossfade', 0)
def prefs_playback_toggled(self, button):
self.config.show_playback = button.get_active()
func = 'show' if self.config.show_playback else 'hide'
for widget in [self.prevbutton, self.ppbutton, self.stopbutton, self.nextbutton, self.volumebutton]:
getattr(ui, func)(widget)
def prefs_progress_toggled(self, button):
self.config.show_progress = button.get_active()
func = ui.show if self.config.show_progress else ui.hide
for widget in [self.progressbox, self.trayprogressbar]:
func(widget)
def prefs_art_toggled(self, button, art_hbox1, art_hbox2, art_stylized):
button_active = button.get_active()
art_hbox1.set_sensitive(button_active)
art_hbox2.set_sensitive(button_active)
art_stylized.set_sensitive(button_active)
if button_active:
self.traytips.set_size_request(self.notification_width, -1)
self.artwork.artwork_set_default_icon()
for widget in [self.imageeventbox, self.info_imagebox, self.trayalbumeventbox, self.trayalbumimage2]:
widget.set_no_show_all(False)
if widget in [self.trayalbumeventbox, self.trayalbumimage2]:
if self.status_is_play_or_pause():
widget.show_all()
else:
widget.show_all()
self.config.show_covers = True
self.update_cursong()
self.artwork.artwork_update()
else:
self.traytips.set_size_request(self.notification_width-100, -1)
for widget in [self.imageeventbox, self.info_imagebox, self.trayalbumeventbox, self.trayalbumimage2]:
ui.hide(widget)
self.config.show_covers = False
self.update_cursong()
# Force a resize of the info labels, if needed:
gobject.idle_add(self.on_notebook_resize, self.notebook, None)
def prefs_stylized_toggled(self, button):
self.config.covers_type = button.get_active()
self.library.library_browse(root=self.config.wd)
self.artwork.artwork_update(True)
def prefs_lyrics_toggled(self, button, lyrics_hbox):
self.config.show_lyrics = button.get_active()
lyrics_hbox.set_sensitive(self.config.show_lyrics)
self.info.show_lyrics_updated()
if self.config.show_lyrics:
self.info_update(True)
def prefs_statusbar_toggled(self, button):
self.config.show_statusbar = button.get_active()
if self.config.show_statusbar:
self.statusbar.set_no_show_all(False)
if self.config.expanded:
self.statusbar.show_all()
else:
ui.hide(self.statusbar)
self.update_statusbar()
def prefs_notif_toggled(self, button, notifhbox):
self.config.show_notification = button.get_active()
notifhbox.set_sensitive(self.config.show_notification)
if self.config.show_notification:
self.on_currsong_notify()
else:
try:
gobject.source_remove(self.traytips.notif_handler)
except:
pass
self.traytips.hide()
def prefs_trayicon_toggled(self, button, minimize):
# Note that we update the sensitivity of the minimize
# CheckButton to reflect if the trayicon is visible.
if button.get_active():
self.config.show_trayicon = True
if HAVE_STATUS_ICON:
self.statusicon.set_visible(True)
if self.statusicon.is_embedded() or self.statusicon.get_visible():
minimize.set_sensitive(True)
elif HAVE_EGG:
self.trayicon.show_all()
if self.trayicon.get_property('visible'):
minimize.set_sensitive(True)
else:
self.config.show_trayicon = False
minimize.set_sensitive(False)
if HAVE_STATUS_ICON:
self.statusicon.set_visible(False)
elif HAVE_EGG:
self.trayicon.hide_all()
def seek(self, song, seektime):
mpdh.call(self.client, 'seek', song, seektime)
self.iterate_now()
def on_link_click(self, linktype):
browser_not_loaded = False
if linktype == 'artist':
browser_not_loaded = not misc.browser_load("http://www.wikipedia.org/wiki/Special:Search/" + urllib.quote(mpdh.get(self.songinfo, 'artist')), self.config.url_browser, self.window)
elif linktype == 'album':
browser_not_loaded = not misc.browser_load("http://www.wikipedia.org/wiki/Special:Search/" + urllib.quote(mpdh.get(self.songinfo, 'album')), self.config.url_browser, self.window)
elif linktype == 'edit':
if self.songinfo:
self.on_tags_edit(None)
elif linktype == 'search':
self.on_lyrics_search(None)
elif linktype == 'editlyrics':
browser_not_loaded = not misc.browser_load(self.lyricwiki.lyricwiki_editlink(self.songinfo), self.config.url_browser, self.window)
if browser_not_loaded:
ui.show_msg(self.window, _('Unable to launch a suitable browser.'), _('Launch Browser'), 'browserLoadError', gtk.BUTTONS_CLOSE)
def on_tab_click(self, _widget, event):
if event.button == 3:
self.notebookmenu.popup(None, None, None, event.button, event.time)
return True
def notebook_get_tab_num(self, notebook, tabname):
for tab in range(notebook.get_n_pages()):
if self.notebook_get_tab_text(self.notebook, tab) == tabname:
return tab
def notebook_tab_is_visible(self, notebook, tabname):
tab = self.notebook.get_children()[self.notebook_get_tab_num(notebook, tabname)]
return tab.get_property('visible')
def notebook_get_visible_tab_num(self, notebook, tab_num):
# Get actual tab number for visible tab_num. If there is not
# a visible tab for tab_num, return -1.\
curr_tab = -1
for tab in range(notebook.get_n_pages()):
if notebook.get_children()[tab].get_property('visible'):
curr_tab += 1
if curr_tab == tab_num:
return tab
return -1
def notebook_get_tab_text(self, notebook, tab_num):
tab = notebook.get_children()[tab_num]
return notebook.get_tab_label(tab).get_child().get_children()[1].get_text()
def on_notebook_page_change(self, _notebook, _page, page_num):
self.current_tab = self.notebook_get_tab_text(self.notebook, page_num)
to_focus = self.tabname2focus.get(self.current_tab, None)
if to_focus:
gobject.idle_add(ui.focus, to_focus)
gobject.idle_add(self.update_menu_visibility)
if not self.img_clicked:
self.last_tab = self.current_tab
def on_window_click(self, _widget, event):
if event.button == 3:
self.menu_popup(self.window, event)
def menu_popup(self, widget, event):
if widget == self.window:
if event.get_coords()[1] > self.notebook.get_allocation()[1]:
return
if event.button == 3:
self.update_menu_visibility(True)
gobject.idle_add(self.mainmenu.popup, None, None, None, event.button, event.time)
def on_tab_toggle(self, toggleAction):
name = toggleAction.get_name()
if not toggleAction.get_active():
# Make sure we aren't hiding the last visible tab:
num_tabs_vis = 0
for tab in self.notebook.get_children():
if tab.get_property('visible'):
num_tabs_vis += 1
if num_tabs_vis == 1:
# Keep menu item checking and exit..
toggleAction.set_active(True)
return
# Store value:
if name == self.TAB_CURRENT:
self.config.current_tab_visible = toggleAction.get_active()
elif name == self.TAB_LIBRARY:
self.config.library_tab_visible = toggleAction.get_active()
elif name == self.TAB_PLAYLISTS:
self.config.playlists_tab_visible = toggleAction.get_active()
elif name == self.TAB_STREAMS:
self.config.streams_tab_visible = toggleAction.get_active()
elif name == self.TAB_INFO:
self.config.info_tab_visible = toggleAction.get_active()
# Hide/show:
tabnum = self.notebook_get_tab_num(self.notebook, name)
if toggleAction.get_active():
ui.show(self.notebook.get_children()[tabnum])
else:
ui.hide(self.notebook.get_children()[tabnum])
def on_library_search_shortcut(self, _event):
# Ensure library tab is visible
if not self.notebook_tab_is_visible(self.notebook, self.TAB_LIBRARY):
return
if self.current_tab != self.TAB_LIBRARY:
self.switch_to_tab_name(self.TAB_LIBRARY)
if self.library.search_visible():
self.library.on_search_end(None)
self.library.libsearchfilter_set_focus()
def update_menu_visibility(self, show_songinfo_only=False):
if show_songinfo_only or not self.config.expanded:
for menu in ['add', 'replace', 'playafter', 'rename', 'rm', 'pl', \
'remove', 'clear', 'update', 'new', 'edit', 'sort', 'tag']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
return
elif self.current_tab == self.TAB_CURRENT:
if len(self.currentdata) > 0:
if self.current_selection.count_selected_rows() > 0:
for menu in ['remove', 'tag']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').show()
else:
for menu in ['remove', 'tag']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
if not self.current.filterbox_visible:
for menu in ['clear', 'pl', 'sort']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').show()
else:
for menu in ['clear', 'pl', 'sort']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
else:
for menu in ['clear', 'pl', 'sort', 'remove', 'tag']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
for menu in ['add', 'replace', 'playafter', 'rename', 'rm', \
'update', 'new', 'edit']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
elif self.current_tab == self.TAB_LIBRARY:
if len(self.librarydata) > 0:
if self.library_selection.count_selected_rows() > 0:
for menu in ['add', 'replace', 'playafter', 'tag', 'pl']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').show()
self.UIManager.get_widget('/mainmenu/updatemenu/updateselectedmenu/').show()
else:
for menu in ['add', 'replace', 'playafter', 'tag', 'pl']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
self.UIManager.get_widget('/mainmenu/updatemenu/updateselectedmenu/').hide()
else:
for menu in ['add', 'replace', 'playafter', 'tag', 'update', 'pl']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
for menu in ['remove', 'clear', 'rename', 'rm', 'new', 'edit', 'sort']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
if self.library.search_visible():
self.UIManager.get_widget('/mainmenu/updatemenu/').hide()
else:
self.UIManager.get_widget('/mainmenu/updatemenu/').show()
self.UIManager.get_widget('/mainmenu/updatemenu/updatefullmenu/').show()
elif self.current_tab == self.TAB_PLAYLISTS:
if self.playlists_selection.count_selected_rows() > 0:
for menu in ['add', 'replace', 'playafter', 'rm']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').show()
if self.playlists_selection.count_selected_rows() == 1 and mpdh.mpd_major_version(self.client) >= 0.13:
self.UIManager.get_widget('/mainmenu/renamemenu/').show()
else:
self.UIManager.get_widget('/mainmenu/renamemenu/').hide()
else:
for menu in ['add', 'replace', 'playafter', 'rm', 'rename']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
for menu in ['remove', 'clear', 'pl', 'update', 'new', 'edit', 'sort', 'tag']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
elif self.current_tab == self.TAB_STREAMS:
self.UIManager.get_widget('/mainmenu/newmenu/').show()
if self.streams_selection.count_selected_rows() > 0:
if self.streams_selection.count_selected_rows() == 1:
self.UIManager.get_widget('/mainmenu/editmenu/').show()
else:
self.UIManager.get_widget('/mainmenu/editmenu/').hide()
for menu in ['add', 'replace', 'playafter', 'rm']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').show()
else:
for menu in ['add', 'replace', 'playafter', 'rm']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
for menu in ['rename', 'remove', 'clear', 'pl', 'update', 'sort', 'tag']:
self.UIManager.get_widget('/mainmenu/' + menu + 'menu/').hide()
def find_path(self, filename):
full_filename = None
if HAVE_SUGAR:
full_filename = os.path.join(activity.get_bundle_path(), 'share', filename)
else:
if os.path.exists(os.path.join(os.path.split(__file__)[0], filename)):
full_filename = os.path.join(os.path.split(__file__)[0], filename)
elif os.path.exists(os.path.join(os.path.split(__file__)[0], 'pixmaps', filename)):
full_filename = os.path.join(os.path.split(__file__)[0], 'pixmaps', filename)
elif os.path.exists(os.path.join(os.path.split(__file__)[0], 'share', filename)):
full_filename = os.path.join(os.path.split(__file__)[0], 'share', filename)
elif os.path.exists(os.path.join(__file__.split('/lib')[0], 'share', 'pixmaps', filename)):
full_filename = os.path.join(__file__.split('/lib')[0], 'share', 'pixmaps', filename)
elif os.path.exists(os.path.join(sys.prefix, 'share', 'pixmaps', filename)):
full_filename = os.path.join(sys.prefix, 'share', 'pixmaps', filename)
if not full_filename:
print filename + " cannot be found. Aborting..."
sys.exit(1)
return full_filename
def on_tags_edit(self, _widget):
ui.change_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
while gtk.events_pending():
gtk.main_iteration()
files = []
temp_mpdpaths = []
if self.current_tab == self.TAB_INFO:
if self.status_is_play_or_pause():
# Use current file in songinfo:
mpdpath = mpdh.get(self.songinfo, 'file')
fullpath = os.path.join(self.config.musicdir[self.config.profile_num], mpdpath)
files.append(fullpath)
temp_mpdpaths.append(mpdpath)
elif self.current_tab == self.TAB_LIBRARY:
# Populates files array with selected library items:
items = self.library.get_path_child_filenames(False)
for item in items:
files.append(os.path.join(self.config.musicdir[self.config.profile_num], item))
temp_mpdpaths.append(item)
elif self.current_tab == self.TAB_CURRENT:
# Populates files array with selected current playlist items:
temp_mpdpaths = self.current.get_selected_filenames(False)
files = self.current.get_selected_filenames(True)
tageditor = tagedit.TagEditor(self.window, self.tags_mpd_update, self.tags_set_use_mpdpath)
tageditor.set_use_mpdpaths(self.config.tags_use_mpdpath)
tageditor.on_tags_edit(files, temp_mpdpaths, self.config.musicdir[self.config.profile_num])
def tags_set_use_mpdpath(self, use_mpdpath):
self.config.tags_use_mpdpath = use_mpdpath
def tags_mpd_update(self, tag_paths):
mpdh.update(self.client, list(tag_paths), self.status)
self.mpd_update_queued = True
def on_about(self, _action):
about_dialog = about.About(self.window, self.config, version, __license__, self.find_path('sonata_large.png'))
stats = None
if self.conn:
# Extract some MPD stats:
mpdstats = mpdh.call(self.client, 'stats')
stats = {'artists': mpdstats['artists'],
'albums': mpdstats['albums'],
'songs': mpdstats['songs'],
'db_playtime': mpdstats['db_playtime'],
}
about_dialog.about_load(stats)
def systemtray_initialize(self):
# Make system tray 'icon' to sit in the system tray
if HAVE_STATUS_ICON:
self.statusicon = gtk.StatusIcon()
self.statusicon.set_from_file(self.find_path('sonata.png'))
self.statusicon.set_visible(self.config.show_trayicon)
self.statusicon.connect('popup_menu', self.systemtray_menu)
self.statusicon.connect('activate', self.systemtray_activate)
elif HAVE_EGG:
self.trayimage = ui.image()
self.trayeventbox = ui.eventbox(add=self.trayimage)
self.trayeventbox.connect('button_press_event', self.systemtray_click)
self.trayeventbox.connect('scroll-event', self.systemtray_scroll)
self.trayeventbox.connect('size-allocate', self.systemtray_size)
self.traytips.set_tip(self.trayeventbox)
try:
self.trayicon = egg.trayicon.TrayIcon("TrayIcon")
self.trayicon.add(self.trayeventbox)
if self.config.show_trayicon:
self.trayicon.show_all()
self.eggtrayfile = self.find_path('sonata.png')
self.trayimage.set_from_pixbuf(img.get_pixbuf_of_size(gtk.gdk.pixbuf_new_from_file(self.eggtrayfile), self.eggtrayheight)[0])
else:
self.trayicon.hide_all()
except:
pass
def dbus_show(self):
self.window.hide()
self.withdraw_app_undo()
def dbus_toggle(self):
if self.window.get_property('visible'):
self.withdraw_app()
else:
self.withdraw_app_undo()
def dbus_popup(self):
self.on_currsong_notify(force_popup=True)
def main(self):
gtk.main()
|
gpl-3.0
|
mhaessig/servo
|
tests/wpt/css-tests/tools/pytest/_pytest/tmpdir.py
|
189
|
4123
|
""" support for providing temporary directories to test functions. """
import re
import pytest
import py
from _pytest.monkeypatch import monkeypatch
class TempdirFactory:
"""Factory for temporary directories under the common base temp directory.
The base directory can be configured using the ``--basetemp`` option.
"""
def __init__(self, config):
self.config = config
self.trace = config.trace.get("tmpdir")
def ensuretemp(self, string, dir=1):
""" (deprecated) return temporary directory path with
the given string as the trailing part. It is usually
better to use the 'tmpdir' function argument which
provides an empty unique-per-test-invocation directory
and is guaranteed to be empty.
"""
#py.log._apiwarn(">1.1", "use tmpdir function argument")
return self.getbasetemp().ensure(string, dir=dir)
def mktemp(self, basename, numbered=True):
"""Create a subdirectory of the base temporary directory and return it.
If ``numbered``, ensure the directory is unique by adding a number
prefix greater than any existing one.
"""
basetemp = self.getbasetemp()
if not numbered:
p = basetemp.mkdir(basename)
else:
p = py.path.local.make_numbered_dir(prefix=basename,
keep=0, rootdir=basetemp, lock_timeout=None)
self.trace("mktemp", p)
return p
def getbasetemp(self):
""" return base temporary directory. """
try:
return self._basetemp
except AttributeError:
basetemp = self.config.option.basetemp
if basetemp:
basetemp = py.path.local(basetemp)
if basetemp.check():
basetemp.remove()
basetemp.mkdir()
else:
temproot = py.path.local.get_temproot()
user = get_user()
if user:
# use a sub-directory in the temproot to speed-up
# make_numbered_dir() call
rootdir = temproot.join('pytest-of-%s' % user)
else:
rootdir = temproot
rootdir.ensure(dir=1)
basetemp = py.path.local.make_numbered_dir(prefix='pytest-',
rootdir=rootdir)
self._basetemp = t = basetemp.realpath()
self.trace("new basetemp", t)
return t
def finish(self):
self.trace("finish")
def get_user():
"""Return the current user name, or None if getuser() does not work
in the current environment (see #1010).
"""
import getpass
try:
return getpass.getuser()
except (ImportError, KeyError):
return None
# backward compatibility
TempdirHandler = TempdirFactory
def pytest_configure(config):
"""Create a TempdirFactory and attach it to the config object.
This is to comply with existing plugins which expect the handler to be
available at pytest_configure time, but ideally should be moved entirely
to the tmpdir_factory session fixture.
"""
mp = monkeypatch()
t = TempdirFactory(config)
config._cleanup.extend([mp.undo, t.finish])
mp.setattr(config, '_tmpdirhandler', t, raising=False)
mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
@pytest.fixture(scope='session')
def tmpdir_factory(request):
"""Return a TempdirFactory instance for the test session.
"""
return request.config._tmpdirhandler
@pytest.fixture
def tmpdir(request, tmpdir_factory):
"""return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a `py.path.local`_
path object.
"""
name = request.node.name
name = re.sub("[\W]", "_", name)
MAXVAL = 30
if len(name) > MAXVAL:
name = name[:MAXVAL]
x = tmpdir_factory.mktemp(name, numbered=True)
return x
|
mpl-2.0
|
femmerling/DirMaker
|
box/lib/python2.7/site-packages/gunicorn/app/pasterapp.py
|
7
|
4839
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import pkg_resources
import sys
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from paste.deploy import loadapp, loadwsgi
SERVER = loadwsgi.SERVER
from gunicorn.app.base import Application
from gunicorn.config import Config
class PasterBaseApplication(Application):
def app_config(self):
cx = loadwsgi.loadcontext(SERVER, self.cfgurl, relative_to=self.relpath)
gc, lc = cx.global_conf.copy(), cx.local_conf.copy()
cfg = {}
host, port = lc.pop('host', ''), lc.pop('port', '')
if host and port:
cfg['bind'] = '%s:%s' % (host, port)
elif host:
cfg['bind'] = host.split(',')
cfg['workers'] = int(lc.get('workers', 1))
cfg['umask'] = int(lc.get('umask', 0))
cfg['default_proc_name'] = gc.get('__file__')
for k, v in gc.items():
if k not in self.cfg.settings:
continue
cfg[k] = v
for k, v in lc.items():
if k not in self.cfg.settings:
continue
cfg[k] = v
return cfg
def load_config(self):
super(PasterBaseApplication, self).load_config()
# reload logging conf
if hasattr(self, "cfgfname"):
parser = ConfigParser.ConfigParser()
parser.read([self.cfgfname])
if parser.has_section('loggers'):
from logging.config import fileConfig
config_file = os.path.abspath(self.cfgfname)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
class PasterApplication(PasterBaseApplication):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application name specified.")
cfgfname = os.path.normpath(os.path.join(os.getcwd(), args[0]))
cfgfname = os.path.abspath(cfgfname)
if not os.path.exists(cfgfname):
parser.error("Config file not found: %s" % cfgfname)
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
sys.path.insert(0, self.relpath)
pkg_resources.working_set.add_entry(self.relpath)
return self.app_config()
def load(self):
return loadapp(self.cfgurl, relative_to=self.relpath)
class PasterServerApplication(PasterBaseApplication):
def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
self.cfg = Config()
self.app = app
self.callable = None
gcfg = gcfg or {}
cfgfname = gcfg.get("__file__")
if cfgfname is not None:
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
cfg = kwargs.copy()
if port and not host.startswith("unix:"):
bind = "%s:%s" % (host, port)
else:
bind = host
cfg["bind"] = bind.split(',')
if gcfg:
for k, v in gcfg.items():
cfg[k] = v
cfg["default_proc_name"] = cfg['__file__']
try:
for k, v in cfg.items():
if k.lower() in self.cfg.settings and v is not None:
self.cfg.set(k.lower(), v)
except Exception as e:
sys.stderr.write("\nConfig error: %s\n" % str(e))
sys.stderr.flush()
sys.exit(1)
def load_config(self):
if not hasattr(self, "cfgfname"):
return
cfg = self.app_config()
for k, v in cfg.items():
try:
self.cfg.set(k.lower(), v)
except:
sys.stderr.write("Invalid value for %s: %s\n\n" % (k, v))
raise
def load(self):
if hasattr(self, "cfgfname"):
return loadapp(self.cfgurl, relative_to=self.relpath)
return self.app
def run():
"""\
The ``gunicorn_paster`` command for launcing Paster compatible
apllications like Pylons or Turbogears2
"""
from gunicorn.app.pasterapp import PasterApplication
PasterApplication("%(prog)s [OPTIONS] pasteconfig.ini").run()
def paste_server(app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
"""\
A paster server.
Then entry point in your paster ini file should looks like this:
[server:main]
use = egg:gunicorn#main
host = 127.0.0.1
port = 5000
"""
from gunicorn.app.pasterapp import PasterServerApplication
PasterServerApplication(app, gcfg=gcfg, host=host, port=port, *args, **kwargs).run()
|
mit
|
AzCiS/autorest
|
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/Http/autoresthttpinfrastructuretestservice/models/a.py
|
8
|
1156
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class A(Model):
"""A.
:param status_code:
:type status_code: str
"""
_attribute_map = {
'status_code': {'key': 'statusCode', 'type': 'str'},
}
def __init__(self, status_code=None):
self.status_code = status_code
class MyException(HttpOperationError):
"""Server responsed with exception of type: 'A'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(MyException, self).__init__(deserialize, response, 'A', *args)
|
mit
|
Ultimaker/Uranium
|
tests/Settings/MockContainer.py
|
1
|
3932
|
from typing import Optional
from UM.Settings.Interfaces import ContainerInterface
import UM.PluginObject
from UM.Signal import Signal
## Fake container class to add to the container registry.
#
# This allows us to test the container registry without testing the container
# class. If something is wrong in the container class it won't influence this
# test.
class MockContainer(ContainerInterface, UM.PluginObject.PluginObject):
## Initialise a new definition container.
#
# The container will have the specified ID and all metadata in the
# provided dictionary.
def __init__(self, metadata = None):
super().__init__()
if metadata is None:
self._metadata = {}
else:
self._metadata = metadata
self._plugin_id = "MockContainerPlugin"
## Gets the ID that was provided at initialisation.
#
# \return The ID of the container.
def getId(self):
return self._metadata["id"]
## Gets all metadata of this container.
#
# This returns the metadata dictionary that was provided in the
# constructor of this mock container.
#
# \return The metadata for this container.
def getMetaData(self):
return self._metadata
## Gets a metadata entry from the metadata dictionary.
#
# \param key The key of the metadata entry.
# \return The value of the metadata entry, or None if there is no such
# entry.
def getMetaDataEntry(self, entry, default = None):
if entry in self._metadata:
return self._metadata[entry]
return default
## Gets a human-readable name for this container.
#
# \return Always returns "MockContainer".
def getName(self):
return "MockContainer"
## Get whether the container item is stored on a read only location in the filesystem.
#
# \return Always returns False
def isReadOnly(self):
return False
## Mock get path
def getPath(self):
return "/path/to/the/light/side"
## Mock set path
def setPath(self, path):
pass
def getAllKeys(self):
pass
def setProperty(self, key, property_name, property_value, container = None, set_from_cache = False):
pass
def getProperty(self, key, property_name, context=None):
if key in self.items:
return self.items[key]
return None
## Get the value of a container item.
#
# Since this mock container cannot contain any items, it always returns
# None.
#
# \return Always returns None.
def getValue(self, key):
pass
## Get whether the container item has a specific property.
#
# This method is not implemented in the mock container.
def hasProperty(self, key, property_name):
return key in self.items
## Serializes the container to a string representation.
#
# This method is not implemented in the mock container.
def serialize(self, ignored_metadata_keys = None):
raise NotImplementedError()
# Should return false (or even throw an exception) if trust (or other verification) is invalidated.
def _trustHook(self, file_name: Optional[str]) -> bool:
return True
## Deserializes the container from a string representation.
#
# This method is not implemented in the mock container.
def deserialize(self, serialized, file_name: Optional[str] = None):
raise NotImplementedError()
@classmethod
def getConfigurationTypeFromSerialized(cls, serialized: str):
raise NotImplementedError()
@classmethod
def getVersionFromSerialized(cls, serialized):
raise NotImplementedError()
def isDirty(self):
return True
def setDirty(self, dirty):
pass
metaDataChanged = Signal()
propertyChanged = Signal()
containersChanged = Signal()
|
lgpl-3.0
|
kkoksvik/FreeCAD
|
src/Mod/Fem/MechanicalMaterial.py
|
7
|
2297
|
# ***************************************************************************
# * *
# * Copyright (c) 2013 - Juergen Riegel <[email protected]> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "MechanicalMaterial"
__author__ = "Juergen Riegel, Bernd Hahnebach"
__url__ = "http://www.freecadweb.org"
import FreeCAD
import _MechanicalMaterial
def makeMechanicalMaterial(name):
'''makeMaterial(name): makes an Material
name there fore is a material name or an file name for a FCMat file'''
obj = FreeCAD.ActiveDocument.addObject("App::MaterialObjectPython", name)
_MechanicalMaterial._MechanicalMaterial(obj)
if FreeCAD.GuiUp:
import _ViewProviderMechanicalMaterial
_ViewProviderMechanicalMaterial._ViewProviderMechanicalMaterial(obj.ViewObject)
# FreeCAD.ActiveDocument.recompute()
return obj
|
lgpl-2.1
|
duncant/stupid_python_tricks
|
shadowstack.py
|
1
|
2920
|
# This file is part of stupid_python_tricks written by Duncan Townsend.
#
# stupid_python_tricks is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# stupid_python_tricks is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with stupid_python_tricks. If not, see <http://www.gnu.org/licenses/>.
from localdata import LocalList
from decorator_decorator import decorator_decorator
@decorator_decorator
def shadowstack(f):
"""Builds a shadow stack for its argument.
This side-steps python's relatively small stack for functions that are not
tail-recursive, but are naturally expressed through recursion.
This will not optimize the memory usage of your program. It only keeps
python's natural stack from overflowing.
If you want to optimize the memory usage of your program, rewrite
it so that it is tail-recursive and then use the tco module.
For a single recursion, the function may be called multiple time with the
same set of arguments. This is especially true if the function is multiply
recursive.
shadowstack is intended for use as a decorator. e.g.
@shadowstack
def foo(*args, **kwargs):
...
"""
class RecursionException(BaseException):
pass
pending = LocalList()
def shadowstacked(*args, **kwargs):
if pending:
try:
return pending[-1][2][(args, frozenset(kwargs.iteritems()))]
except KeyError:
raise RecursionException(args, kwargs)
# We don't catch TypeError because if the arguments are unhashable,
# we'll just spin forever.
else:
pending.append((args, kwargs, {}))
try:
while pending:
args, kwargs = pending[-1][:2]
try:
retval = f(*args, **kwargs)
except RecursionException as e:
assert len(e.args) == 2
args, kwargs = e.args
del e
pending.append((args, kwargs, {}))
else:
pending.pop()
if pending:
pending[-1][2][(args, frozenset(kwargs.iteritems()))] = retval
return retval
finally:
del pending[:]
return shadowstacked
__all__ = ['shadowstack']
import callable_module
callable_module(shadowstack)
|
lgpl-3.0
|
HugoLnx/fofix
|
src/midi/MidiToText.py
|
7
|
4151
|
# -*- coding: ISO-8859-1 -*-
from MidiOutStream import MidiOutStream
class MidiToText(MidiOutStream):
"""
This class renders a midi file as text. It is mostly used for debugging
"""
#############################
# channel events
def channel_message(self, message_type, channel, data):
"""The default event handler for channel messages"""
print 'message_type:%X, channel:%X, data size:%X' % (message_type, channel, len(data))
def note_on(self, channel=0, note=0x40, velocity=0x40):
print 'note_on - ch:%02X, note:%02X, vel:%02X time:%s' % (channel, note, velocity, self.rel_time())
def note_off(self, channel=0, note=0x40, velocity=0x40):
print 'note_off - ch:%02X, note:%02X, vel:%02X time:%s' % (channel, note, velocity, self.rel_time())
def aftertouch(self, channel=0, note=0x40, velocity=0x40):
print 'aftertouch', channel, note, velocity
def continuous_controller(self, channel, controller, value):
print 'controller - ch: %02X, cont: #%02X, value: %02X' % (channel, controller, value)
def patch_change(self, channel, patch):
print 'patch_change - ch:%02X, patch:%02X' % (channel, patch)
def channel_pressure(self, channel, pressure):
print 'channel_pressure', channel, pressure
def pitch_bend(self, channel, value):
print 'pitch_bend ch:%s, value:%s' % (channel, value)
#####################
## Common events
def system_exclusive(self, data):
print 'system_exclusive - data size: %s' % len(date)
def song_position_pointer(self, value):
print 'song_position_pointer: %s' % value
def song_select(self, songNumber):
print 'song_select: %s' % songNumber
def tuning_request(self):
print 'tuning_request'
def midi_time_code(self, msg_type, values):
print 'midi_time_code - msg_type: %s, values: %s' % (msg_type, values)
#########################
# header does not really belong here. But anyhoo!!!
def header(self, format=0, nTracks=1, division=96):
print 'format: %s, nTracks: %s, division: %s' % (format, nTracks, division)
print '----------------------------------'
print ''
def eof(self):
print 'End of file'
def start_of_track(self, n_track=0):
print 'Start - track #%s' % n_track
def end_of_track(self):
print 'End of track'
print ''
###############
# sysex event
def sysex_event(self, data):
print 'sysex_event - datasize: %X' % len(data)
#####################
## meta events
def meta_event(self, meta_type, data):
print 'undefined_meta_event:', meta_type, len(data)
def sequence_number(self, value):
print 'sequence_number', value
def text(self, text):
print 'text', text
def copyright(self, text):
print 'copyright', text
def sequence_name(self, text):
print 'sequence_name:', text
def instrument_name(self, text):
print 'instrument_name:', text
def lyric(self, text):
print 'lyric', text
def marker(self, text):
print 'marker', text
def cuepoint(self, text):
print 'cuepoint', text
def midi_ch_prefix(self, channel):
print 'midi_ch_prefix', channel
def midi_port(self, value):
print 'midi_port:', value
def tempo(self, value):
print 'tempo:', value
def smtp_offset(self, hour, minute, second, frame, framePart):
print 'smtp_offset', hour, minute, second, frame, framePart
def time_signature(self, nn, dd, cc, bb):
print 'time_signature:', self.abs_time(), nn, dd, cc, bb
def key_signature(self, sf, mi):
print 'key_signature', sf, mi
def sequencer_specific(self, data):
print 'sequencer_specific', len(data)
if __name__ == '__main__':
# get data
import sys
test_file = sys.argv[1]
f = open(test_file, 'rb')
# do parsing
from MidiInFile import MidiInFile
midiIn = MidiInFile(MidiToText(), f)
midiIn.read()
f.close()
|
gpl-2.0
|
flotre/Sick-Beard
|
lib/hachoir_parser/misc/msoffice_summary.py
|
90
|
12537
|
"""
Microsoft Document summaries structures.
Documents
---------
- Apache POI (HPSF Internals):
http://poi.apache.org/hpsf/internals.html
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.field import (FieldSet, ParserError,
RootSeekableFieldSet, SeekableFieldSet,
Bit, Bits, NullBits,
UInt8, UInt16, UInt32, TimestampWin64, TimedeltaWin64, Enum,
Bytes, RawBytes, NullBytes, String,
Int8, Int32, Float32, Float64, PascalString32)
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from lib.hachoir_core.tools import createDict
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_parser.common.win32 import GUID, PascalStringWin32, CODEPAGE_CHARSET
from lib.hachoir_parser.image.bmp import BmpHeader, parseImageData
MAX_SECTION_COUNT = 100
OS_MAC = 1
OS_NAME = {
0: "Windows 16-bit",
1: "Macintosh",
2: "Windows 32-bit",
}
class OSConfig:
def __init__(self, big_endian):
if big_endian:
self.charset = "MacRoman"
self.utf16 = "UTF-16-BE"
else:
# FIXME: Don't guess the charset, use ISO-8859-1 or UTF-8
#self.charset = "ISO-8859-1"
self.charset = None
self.utf16 = "UTF-16-LE"
class PropertyIndex(FieldSet):
TAG_CODEPAGE = 1
COMMON_PROPERTY = {
0: "Dictionary",
1: "CodePage",
0x80000000: "LOCALE_SYSTEM_DEFAULT",
0x80000003: "CASE_SENSITIVE",
}
DOCUMENT_PROPERTY = {
2: "Category",
3: "PresentationFormat",
4: "NumBytes",
5: "NumLines",
6: "NumParagraphs",
7: "NumSlides",
8: "NumNotes",
9: "NumHiddenSlides",
10: "NumMMClips",
11: "Scale",
12: "HeadingPairs",
13: "DocumentParts",
14: "Manager",
15: "Company",
16: "LinksDirty",
17: "DocSumInfo_17",
18: "DocSumInfo_18",
19: "DocSumInfo_19",
20: "DocSumInfo_20",
21: "DocSumInfo_21",
22: "DocSumInfo_22",
23: "DocSumInfo_23",
}
DOCUMENT_PROPERTY.update(COMMON_PROPERTY)
COMPONENT_PROPERTY = {
2: "Title",
3: "Subject",
4: "Author",
5: "Keywords",
6: "Comments",
7: "Template",
8: "LastSavedBy",
9: "RevisionNumber",
10: "TotalEditingTime",
11: "LastPrinted",
12: "CreateTime",
13: "LastSavedTime",
14: "NumPages",
15: "NumWords",
16: "NumCharacters",
17: "Thumbnail",
18: "AppName",
19: "Security",
}
COMPONENT_PROPERTY.update(COMMON_PROPERTY)
def createFields(self):
if self["../.."].name.startswith("doc_summary"):
enum = self.DOCUMENT_PROPERTY
else:
enum = self.COMPONENT_PROPERTY
yield Enum(UInt32(self, "id"), enum)
yield UInt32(self, "offset")
def createDescription(self):
return "Property: %s" % self["id"].display
class Bool(Int8):
def createValue(self):
value = Int8.createValue(self)
return (value == -1)
class Thumbnail(FieldSet):
"""
Thumbnail.
Documents:
- See Jakarta POI
http://jakarta.apache.org/poi/hpsf/thumbnails.html
http://www.penguin-soft.com/penguin/developer/poi/
org/apache/poi/hpsf/Thumbnail.html#CF_BITMAP
- How To Extract Thumbnail Images
http://sparks.discreet.com/knowledgebase/public/
solutions/ExtractThumbnailImg.htm
"""
FORMAT_CLIPBOARD = -1
FORMAT_NAME = {
-1: "Windows clipboard",
-2: "Macintosh clipboard",
-3: "GUID that contains format identifier",
0: "No data",
2: "Bitmap",
3: "Windows metafile format",
8: "Device Independent Bitmap (DIB)",
14: "Enhanced Windows metafile",
}
DIB_BMP = 8
DIB_FORMAT = {
2: "Bitmap Obsolete (old BMP)",
3: "Windows metafile format (WMF)",
8: "Device Independent Bitmap (BMP)",
14: "Enhanced Windows metafile (EMF)",
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
yield filesizeHandler(UInt32(self, "size"))
yield Enum(Int32(self, "format"), self.FORMAT_NAME)
if self["format"].value == self.FORMAT_CLIPBOARD:
yield Enum(UInt32(self, "dib_format"), self.DIB_FORMAT)
if self["dib_format"].value == self.DIB_BMP:
yield BmpHeader(self, "bmp_header")
size = (self.size - self.current_size) // 8
yield parseImageData(self, "pixels", size, self["bmp_header"])
return
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
class PropertyContent(FieldSet):
TYPE_LPSTR = 30
TYPE_INFO = {
0: ("EMPTY", None),
1: ("NULL", None),
2: ("UInt16", UInt16),
3: ("UInt32", UInt32),
4: ("Float32", Float32),
5: ("Float64", Float64),
6: ("CY", None),
7: ("DATE", None),
8: ("BSTR", None),
9: ("DISPATCH", None),
10: ("ERROR", None),
11: ("BOOL", Bool),
12: ("VARIANT", None),
13: ("UNKNOWN", None),
14: ("DECIMAL", None),
16: ("I1", None),
17: ("UI1", None),
18: ("UI2", None),
19: ("UI4", None),
20: ("I8", None),
21: ("UI8", None),
22: ("INT", None),
23: ("UINT", None),
24: ("VOID", None),
25: ("HRESULT", None),
26: ("PTR", None),
27: ("SAFEARRAY", None),
28: ("CARRAY", None),
29: ("USERDEFINED", None),
30: ("LPSTR", PascalString32),
31: ("LPWSTR", PascalString32),
64: ("FILETIME", TimestampWin64),
65: ("BLOB", None),
66: ("STREAM", None),
67: ("STORAGE", None),
68: ("STREAMED_OBJECT", None),
69: ("STORED_OBJECT", None),
70: ("BLOB_OBJECT", None),
71: ("THUMBNAIL", Thumbnail),
72: ("CLSID", None),
0x1000: ("Vector", None),
}
TYPE_NAME = createDict(TYPE_INFO, 0)
def createFields(self):
self.osconfig = self.parent.osconfig
if True:
yield Enum(Bits(self, "type", 12), self.TYPE_NAME)
yield Bit(self, "is_vector")
yield NullBits(self, "padding", 32-12-1)
else:
yield Enum(Bits(self, "type", 32), self.TYPE_NAME)
tag = self["type"].value
kw = {}
try:
handler = self.TYPE_INFO[tag][1]
if handler == PascalString32:
osconfig = self.osconfig
if tag == self.TYPE_LPSTR:
kw["charset"] = osconfig.charset
else:
kw["charset"] = osconfig.utf16
elif handler == TimestampWin64:
if self.description == "TotalEditingTime":
handler = TimedeltaWin64
except LookupError:
handler = None
if not handler:
raise ParserError("OLE2: Unable to parse property of type %s" \
% self["type"].display)
if self["is_vector"].value:
yield UInt32(self, "count")
for index in xrange(self["count"].value):
yield handler(self, "item[]", **kw)
else:
yield handler(self, "value", **kw)
self.createValue = lambda: self["value"].value
PropertyContent.TYPE_INFO[12] = ("VARIANT", PropertyContent)
class SummarySection(SeekableFieldSet):
def __init__(self, *args):
SeekableFieldSet.__init__(self, *args)
self._size = self["size"].value * 8
def createFields(self):
self.osconfig = self.parent.osconfig
yield UInt32(self, "size")
yield UInt32(self, "property_count")
for index in xrange(self["property_count"].value):
yield PropertyIndex(self, "property_index[]")
for index in xrange(self["property_count"].value):
findex = self["property_index[%u]" % index]
self.seekByte(findex["offset"].value)
field = PropertyContent(self, "property[]", findex["id"].display)
yield field
if not self.osconfig.charset \
and findex['id'].value == PropertyIndex.TAG_CODEPAGE:
codepage = field['value'].value
if codepage in CODEPAGE_CHARSET:
self.osconfig.charset = CODEPAGE_CHARSET[codepage]
else:
self.warning("Unknown codepage: %r" % codepage)
class SummaryIndex(FieldSet):
static_size = 20*8
def createFields(self):
yield String(self, "name", 16)
yield UInt32(self, "offset")
class BaseSummary:
endian = LITTLE_ENDIAN
def __init__(self):
if self["endian"].value == "\xFF\xFE":
self.endian = BIG_ENDIAN
elif self["endian"].value == "\xFE\xFF":
self.endian = LITTLE_ENDIAN
else:
raise ParserError("OLE2: Invalid endian value")
self.osconfig = OSConfig(self["os_type"].value == OS_MAC)
def createFields(self):
yield Bytes(self, "endian", 2, "Endian (0xFF 0xFE for Intel)")
yield UInt16(self, "format", "Format (0)")
yield UInt8(self, "os_version")
yield UInt8(self, "os_revision")
yield Enum(UInt16(self, "os_type"), OS_NAME)
yield GUID(self, "format_id")
yield UInt32(self, "section_count")
if MAX_SECTION_COUNT < self["section_count"].value:
raise ParserError("OLE2: Too much sections (%s)" % self["section_count"].value)
section_indexes = []
for index in xrange(self["section_count"].value):
section_index = SummaryIndex(self, "section_index[]")
yield section_index
section_indexes.append(section_index)
for section_index in section_indexes:
self.seekByte(section_index["offset"].value)
yield SummarySection(self, "section[]")
size = (self.size - self.current_size) // 8
if 0 < size:
yield NullBytes(self, "end_padding", size)
class SummaryParser(BaseSummary, HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"description": "Microsoft Office summary",
}
def __init__(self, stream, **kw):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **kw)
BaseSummary.__init__(self)
def validate(self):
return True
class SummaryFieldSet(BaseSummary, FieldSet):
def __init__(self, parent, name, description=None, size=None):
FieldSet.__init__(self, parent, name, description=description, size=size)
BaseSummary.__init__(self)
class CompObj(FieldSet):
OS_VERSION = {
0x0a03: "Windows 3.1",
}
def createFields(self):
# Header
yield UInt16(self, "version", "Version (=1)")
yield textHandler(UInt16(self, "endian", "Endian (0xFF 0xFE for Intel)"), hexadecimal)
yield UInt8(self, "os_version")
yield UInt8(self, "os_revision")
yield Enum(UInt16(self, "os_type"), OS_NAME)
yield Int32(self, "unused", "(=-1)")
yield GUID(self, "clsid")
# User type
yield PascalString32(self, "user_type", strip="\0")
# Clipboard format
if self["os_type"].value == OS_MAC:
yield Int32(self, "unused[]", "(=-2)")
yield String(self, "clipboard_format", 4)
else:
yield PascalString32(self, "clipboard_format", strip="\0")
if self.current_size == self.size:
return
#-- OLE 2.01 ---
# Program ID
yield PascalString32(self, "prog_id", strip="\0")
if self["os_type"].value != OS_MAC:
# Magic number
yield textHandler(UInt32(self, "magic", "Magic number (0x71B239F4)"), hexadecimal)
# Unicode version
yield PascalStringWin32(self, "user_type_unicode", strip="\0")
yield PascalStringWin32(self, "clipboard_format_unicode", strip="\0")
yield PascalStringWin32(self, "prog_id_unicode", strip="\0")
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "end_padding", size)
|
gpl-3.0
|
zmerlynn/kubernetes
|
build/json-extractor.py
|
41
|
2100
|
#!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a very simple utility that reads a JSON document from stdin, parses it
# and returns the specified value. The value is described using a simple dot
# notation. If any errors are encountered along the way, an error is output and
# a failure value is returned.
from __future__ import print_function
import json
import sys
def PrintError(*err):
print(*err, file=sys.stderr)
def main():
try:
obj = json.load(sys.stdin)
except Exception, e:
PrintError("Error loading JSON: {0}".format(str(e)))
if len(sys.argv) == 1:
# if we don't have a query string, return success
return 0
elif len(sys.argv) > 2:
PrintError("Usage: {0} <json query>".format(sys.args[0]))
return 1
query_list = sys.argv[1].split('.')
for q in query_list:
if isinstance(obj, dict):
if q not in obj:
PrintError("Couldn't find '{0}' in dict".format(q))
return 1
obj = obj[q]
elif isinstance(obj, list):
try:
index = int(q)
except:
PrintError("Can't use '{0}' to index into array".format(q))
return 1
if index >= len(obj):
PrintError("Index ({0}) is greater than length of list ({1})".format(q, len(obj)))
return 1
obj = obj[index]
else:
PrintError("Trying to query non-queryable object: {0}".format(q))
return 1
if isinstance(obj, str):
print(obj)
else:
print(json.dumps(obj, indent=2))
if __name__ == "__main__":
sys.exit(main())
|
apache-2.0
|
elssar/calibre
|
src/calibre/srv/errors.py
|
10
|
1408
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import httplib
class JobQueueFull(Exception):
pass
class RouteError(ValueError):
pass
class HTTPSimpleResponse(Exception):
def __init__(self, http_code, http_message='', close_connection=False, location=None, authenticate=None, log=None):
Exception.__init__(self, http_message)
self.http_code = http_code
self.close_connection = close_connection
self.location = location
self.authenticate = authenticate
self.log = log
class HTTPRedirect(HTTPSimpleResponse):
def __init__(self, location, http_code=httplib.MOVED_PERMANENTLY, http_message='', close_connection=False):
HTTPSimpleResponse.__init__(self, http_code, http_message, close_connection, location)
class HTTPNotFound(HTTPSimpleResponse):
def __init__(self, http_message='', close_connection=False):
HTTPSimpleResponse.__init__(self, httplib.NOT_FOUND, http_message, close_connection)
class HTTPAuthRequired(HTTPSimpleResponse):
def __init__(self, payload, log=None):
HTTPSimpleResponse.__init__(self, httplib.UNAUTHORIZED, authenticate=payload, log=log)
class InvalidCredentials(ValueError):
pass
|
gpl-3.0
|
ulope/django
|
tests/unmanaged_models/models.py
|
67
|
3698
|
"""
Models can have a ``managed`` attribute, which specifies whether the SQL code
is generated for the table on various manage.py operations.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# All of these models are created in the database by Django.
@python_2_unicode_compatible
class A01(models.Model):
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'a01'
def __str__(self):
return self.f_a
@python_2_unicode_compatible
class B01(models.Model):
fk_a = models.ForeignKey(A01)
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'b01'
# 'managed' is True by default. This tests we can set it explicitly.
managed = True
def __str__(self):
return self.f_a
@python_2_unicode_compatible
class C01(models.Model):
mm_a = models.ManyToManyField(A01, db_table='d01')
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'c01'
def __str__(self):
return self.f_a
# All of these models use the same tables as the previous set (they are shadows
# of possibly a subset of the columns). There should be no creation errors,
# since we have told Django they aren't managed by Django.
@python_2_unicode_compatible
class A02(models.Model):
f_a = models.CharField(max_length=10, db_index=True)
class Meta:
db_table = 'a01'
managed = False
def __str__(self):
return self.f_a
@python_2_unicode_compatible
class B02(models.Model):
class Meta:
db_table = 'b01'
managed = False
fk_a = models.ForeignKey(A02)
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
def __str__(self):
return self.f_a
# To re-use the many-to-many intermediate table, we need to manually set up
# things up.
@python_2_unicode_compatible
class C02(models.Model):
mm_a = models.ManyToManyField(A02, through="Intermediate")
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'c01'
managed = False
def __str__(self):
return self.f_a
class Intermediate(models.Model):
a02 = models.ForeignKey(A02, db_column="a01_id")
c02 = models.ForeignKey(C02, db_column="c01_id")
class Meta:
db_table = 'd01'
managed = False
# These next models test the creation (or not) of many to many join tables
# between managed and unmanaged models. A join table between two unmanaged
# models shouldn't be automatically created (see #10647).
#
# Firstly, we need some models that will create the tables, purely so that the
# tables are created. This is a test setup, not a requirement for unmanaged
# models.
class Proxy1(models.Model):
class Meta:
db_table = "unmanaged_models_proxy1"
class Proxy2(models.Model):
class Meta:
db_table = "unmanaged_models_proxy2"
class Unmanaged1(models.Model):
class Meta:
managed = False
db_table = "unmanaged_models_proxy1"
# Unmanged with an m2m to unmanaged: the intermediary table won't be created.
class Unmanaged2(models.Model):
mm = models.ManyToManyField(Unmanaged1)
class Meta:
managed = False
db_table = "unmanaged_models_proxy2"
# Here's an unmanaged model with an m2m to a managed one; the intermediary
# table *will* be created (unless given a custom `through` as for C02 above).
class Managed1(models.Model):
mm = models.ManyToManyField(Unmanaged1)
|
bsd-3-clause
|
PeterWangIntel/chromium-crosswalk
|
build/android/provision_devices.py
|
2
|
12311
|
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provisions Android devices with settings required for bots.
Usage:
./provision_devices.py [-d <device serial number>]
"""
import argparse
import logging
import os
import posixpath
import re
import subprocess
import sys
import time
from pylib import constants
from pylib import device_settings
from pylib.device import battery_utils
from pylib.device import device_blacklist
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.utils import run_tests_helper
from pylib.utils import timeout_retry
sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT,
'third_party', 'android_testrunner'))
import errors
class _DEFAULT_TIMEOUTS(object):
# L can take a while to reboot after a wipe.
LOLLIPOP = 600
PRE_LOLLIPOP = 180
HELP_TEXT = '{}s on L, {}s on pre-L'.format(LOLLIPOP, PRE_LOLLIPOP)
class _PHASES(object):
WIPE = 'wipe'
PROPERTIES = 'properties'
FINISH = 'finish'
ALL = [WIPE, PROPERTIES, FINISH]
def ProvisionDevices(options):
devices = device_utils.DeviceUtils.HealthyDevices()
if options.device:
devices = [d for d in devices if d == options.device]
if not devices:
raise device_errors.DeviceUnreachableError(options.device)
parallel_devices = device_utils.DeviceUtils.parallel(devices)
parallel_devices.pMap(ProvisionDevice, options)
if options.auto_reconnect:
_LaunchHostHeartbeat()
blacklist = device_blacklist.ReadBlacklist()
if all(d in blacklist for d in devices):
raise device_errors.NoDevicesError
return 0
def ProvisionDevice(device, options):
if options.reboot_timeout:
reboot_timeout = options.reboot_timeout
elif (device.build_version_sdk >=
constants.ANDROID_SDK_VERSION_CODES.LOLLIPOP):
reboot_timeout = _DEFAULT_TIMEOUTS.LOLLIPOP
else:
reboot_timeout = _DEFAULT_TIMEOUTS.PRE_LOLLIPOP
def should_run_phase(phase_name):
return not options.phases or phase_name in options.phases
def run_phase(phase_func, reboot=True):
device.WaitUntilFullyBooted(timeout=reboot_timeout)
phase_func(device, options)
if reboot:
device.Reboot(False, retries=0)
device.adb.WaitForDevice()
try:
if should_run_phase(_PHASES.WIPE):
run_phase(WipeDevice)
if should_run_phase(_PHASES.PROPERTIES):
run_phase(SetProperties)
if should_run_phase(_PHASES.FINISH):
run_phase(FinishProvisioning, reboot=False)
except (errors.WaitForResponseTimedOutError,
device_errors.CommandTimeoutError):
logging.exception('Timed out waiting for device %s. Adding to blacklist.',
str(device))
device_blacklist.ExtendBlacklist([str(device)])
except device_errors.CommandFailedError:
logging.exception('Failed to provision device %s. Adding to blacklist.',
str(device))
device_blacklist.ExtendBlacklist([str(device)])
def WipeDevice(device, options):
"""Wipes data from device, keeping only the adb_keys for authorization.
After wiping data on a device that has been authorized, adb can still
communicate with the device, but after reboot the device will need to be
re-authorized because the adb keys file is stored in /data/misc/adb/.
Thus, adb_keys file is rewritten so the device does not need to be
re-authorized.
Arguments:
device: the device to wipe
"""
if options.skip_wipe:
return
try:
device.EnableRoot()
device_authorized = device.FileExists(constants.ADB_KEYS_FILE)
if device_authorized:
adb_keys = device.ReadFile(constants.ADB_KEYS_FILE,
as_root=True).splitlines()
device.RunShellCommand(['wipe', 'data'],
as_root=True, check_return=True)
device.adb.WaitForDevice()
if device_authorized:
adb_keys_set = set(adb_keys)
for adb_key_file in options.adb_key_files or []:
try:
with open(adb_key_file, 'r') as f:
adb_public_keys = f.readlines()
adb_keys_set.update(adb_public_keys)
except IOError:
logging.warning('Unable to find adb keys file %s.' % adb_key_file)
_WriteAdbKeysFile(device, '\n'.join(adb_keys_set))
except device_errors.CommandFailedError:
logging.exception('Possible failure while wiping the device. '
'Attempting to continue.')
def _WriteAdbKeysFile(device, adb_keys_string):
dir_path = posixpath.dirname(constants.ADB_KEYS_FILE)
device.RunShellCommand(['mkdir', '-p', dir_path],
as_root=True, check_return=True)
device.RunShellCommand(['restorecon', dir_path],
as_root=True, check_return=True)
device.WriteFile(constants.ADB_KEYS_FILE, adb_keys_string, as_root=True)
device.RunShellCommand(['restorecon', constants.ADB_KEYS_FILE],
as_root=True, check_return=True)
def SetProperties(device, options):
try:
device.EnableRoot()
except device_errors.CommandFailedError as e:
logging.warning(str(e))
_ConfigureLocalProperties(device, options.enable_java_debug)
device_settings.ConfigureContentSettings(
device, device_settings.DETERMINISTIC_DEVICE_SETTINGS)
if options.disable_location:
device_settings.ConfigureContentSettings(
device, device_settings.DISABLE_LOCATION_SETTINGS)
else:
device_settings.ConfigureContentSettings(
device, device_settings.ENABLE_LOCATION_SETTINGS)
device_settings.SetLockScreenSettings(device)
if options.disable_network:
device_settings.ConfigureContentSettings(
device, device_settings.NETWORK_DISABLED_SETTINGS)
if options.min_battery_level is not None:
try:
battery = battery_utils.BatteryUtils(device)
battery.ChargeDeviceToLevel(options.min_battery_level)
except device_errors.CommandFailedError as e:
logging.exception('Unable to charge device to specified level.')
if options.max_battery_temp is not None:
try:
battery = battery_utils.BatteryUtils(device)
battery.LetBatteryCoolToTemperature(options.max_battery_temp)
except device_errors.CommandFailedError as e:
logging.exception('Unable to let battery cool to specified temperature.')
def _ConfigureLocalProperties(device, java_debug=True):
"""Set standard readonly testing device properties prior to reboot."""
local_props = [
'persist.sys.usb.config=adb',
'ro.monkey=1',
'ro.test_harness=1',
'ro.audio.silent=1',
'ro.setupwizard.mode=DISABLED',
]
if java_debug:
local_props.append(
'%s=all' % device_utils.DeviceUtils.JAVA_ASSERT_PROPERTY)
local_props.append('debug.checkjni=1')
try:
device.WriteFile(
constants.DEVICE_LOCAL_PROPERTIES_PATH,
'\n'.join(local_props), as_root=True)
# Android will not respect the local props file if it is world writable.
device.RunShellCommand(
['chmod', '644', constants.DEVICE_LOCAL_PROPERTIES_PATH],
as_root=True, check_return=True)
except device_errors.CommandFailedError:
logging.exception('Failed to configure local properties.')
def FinishProvisioning(device, options):
device.RunShellCommand(
['date', '-s', time.strftime('%Y%m%d.%H%M%S', time.gmtime())],
as_root=True, check_return=True)
props = device.RunShellCommand('getprop', check_return=True)
for prop in props:
logging.info(' %s' % prop)
if options.auto_reconnect:
_PushAndLaunchAdbReboot(device, options.target)
def _PushAndLaunchAdbReboot(device, target):
"""Pushes and launches the adb_reboot binary on the device.
Arguments:
device: The DeviceUtils instance for the device to which the adb_reboot
binary should be pushed.
target: The build target (example, Debug or Release) which helps in
locating the adb_reboot binary.
"""
logging.info('Will push and launch adb_reboot on %s' % str(device))
# Kill if adb_reboot is already running.
device.KillAll('adb_reboot', blocking=True, timeout=2, quiet=True)
# Push adb_reboot
logging.info(' Pushing adb_reboot ...')
adb_reboot = os.path.join(constants.DIR_SOURCE_ROOT,
'out/%s/adb_reboot' % target)
device.PushChangedFiles([(adb_reboot, '/data/local/tmp/')])
# Launch adb_reboot
logging.info(' Launching adb_reboot ...')
device.RunShellCommand(
[device.GetDevicePieWrapper(), '/data/local/tmp/adb_reboot'],
check_return=True)
def _LaunchHostHeartbeat():
# Kill if existing host_heartbeat
KillHostHeartbeat()
# Launch a new host_heartbeat
logging.info('Spawning host heartbeat...')
subprocess.Popen([os.path.join(constants.DIR_SOURCE_ROOT,
'build/android/host_heartbeat.py')])
def KillHostHeartbeat():
ps = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
stdout, _ = ps.communicate()
matches = re.findall('\\n.*host_heartbeat.*', stdout)
for match in matches:
logging.info('An instance of host heart beart running... will kill')
pid = re.findall(r'(\S+)', match)[1]
subprocess.call(['kill', str(pid)])
def main():
# Recommended options on perf bots:
# --disable-network
# TODO(tonyg): We eventually want network on. However, currently radios
# can cause perfbots to drain faster than they charge.
# --min-battery-level 95
# Some perf bots run benchmarks with USB charging disabled which leads
# to gradual draining of the battery. We must wait for a full charge
# before starting a run in order to keep the devices online.
parser = argparse.ArgumentParser(
description='Provision Android devices with settings required for bots.')
parser.add_argument('-d', '--device', metavar='SERIAL',
help='the serial number of the device to be provisioned'
' (the default is to provision all devices attached)')
parser.add_argument('--phase', action='append', choices=_PHASES.ALL,
dest='phases',
help='Phases of provisioning to run. '
'(If omitted, all phases will be run.)')
parser.add_argument('--skip-wipe', action='store_true', default=False,
help="don't wipe device data during provisioning")
parser.add_argument('--reboot-timeout', metavar='SECS', type=int,
help='when wiping the device, max number of seconds to'
' wait after each reboot '
'(default: %s)' % _DEFAULT_TIMEOUTS.HELP_TEXT)
parser.add_argument('--min-battery-level', type=int, metavar='NUM',
help='wait for the device to reach this minimum battery'
' level before trying to continue')
parser.add_argument('--disable-location', action='store_true',
help='disable Google location services on devices')
parser.add_argument('--disable-network', action='store_true',
help='disable network access on devices')
parser.add_argument('--disable-java-debug', action='store_false',
dest='enable_java_debug', default=True,
help='disable Java property asserts and JNI checking')
parser.add_argument('-t', '--target', default='Debug',
help='the build target (default: %(default)s)')
parser.add_argument('-r', '--auto-reconnect', action='store_true',
help='push binary which will reboot the device on adb'
' disconnections')
parser.add_argument('--adb-key-files', type=str, nargs='+',
help='list of adb keys to push to device')
parser.add_argument('-v', '--verbose', action='count', default=1,
help='Log more information.')
parser.add_argument('--max-battery-temp', type=int, metavar='NUM',
help='Wait for the battery to have this temp or lower.')
args = parser.parse_args()
constants.SetBuildType(args.target)
run_tests_helper.SetLogLevel(args.verbose)
return ProvisionDevices(args)
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
spawnedc/MeCanBlog
|
django/contrib/gis/feeds.py
|
327
|
5925
|
from django.contrib.syndication.feeds import Feed as BaseFeed, FeedDoesNotExist
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin(object):
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, this will return
a unicode GeoRSS representation.
"""
return u' '.join([u'%f %f' % (coord[1], coord[0]) for coord in coords])
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more pouplar
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement(u'geo:lat', u'%f' % lat)
handler.addQuickElement(u'geo:lon', u'%f' % lon)
else:
handler.addQuickElement(u'georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""
This routine adds a GeoRSS XML element using the given item and handler.
"""
# Getting the Geometry object.
geom = item.get('geometry', None)
if not geom is None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if not box_coords is None:
if w3c_geo: raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement(u'georss:box', self.georss_coords(box_coords))
else:
# Getting the lower-case geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo: raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement(u'georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement(u'georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(GeoRSSFeed, self).rss_attributes()
attrs[u'xmlns:georss'] = u'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoRSSFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoRSSFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super(GeoAtom1Feed, self).root_attributes()
attrs[u'xmlns:georss'] = u'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoAtom1Feed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoAtom1Feed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(W3CGeoFeed, self).rss_attributes()
attrs[u'xmlns:geo'] = u'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super(W3CGeoFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super(W3CGeoFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry' : self.__get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry' : self.__get_dynamic_attr('item_geometry', item)}
|
bsd-3-clause
|
morreene/tradenews
|
venv/Lib/site-packages/werkzeug/contrib/atom.py
|
104
|
15575
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
from werkzeug._compat import implements_to_string, string_types
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
iso8601 = obj.isoformat()
if obj.tzinfo:
return iso8601
return iso8601 + 'Z'
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
Treated as UTC if naive datetime.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if any(not e.author for e in self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' %
(k, escape(link[k])) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author', ())
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.categories = kwargs.get('categories', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' %
(k, escape(link[k])) for k in link)
for category in self.categories:
yield u' <category %s/>\n' % ''.join('%s="%s" ' %
(k, escape(category[k])) for k in category)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __str__(self):
return self.to_string()
|
bsd-3-clause
|
nlholdem/icodoom
|
.venv/lib/python2.7/site-packages/tensorflow/contrib/seq2seq/python/ops/decoder_fn.py
|
19
|
10437
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Seq2seq loss operations for use in neural networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
__all__ = ["simple_decoder_fn_train",
"simple_decoder_fn_inference"]
def simple_decoder_fn_train(encoder_state, name=None):
""" Simple decoder function for a sequence-to-sequence model used in the
`dynamic_rnn_decoder`.
The `simple_decoder_fn_train` is a simple training function for a
sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is
in the training mode.
The `simple_decoder_fn_train` is called with a set of the user arguments and
returns the `decoder_fn`, which can be passed to the `dynamic_rnn_decoder`,
such that
```
dynamic_fn_train = simple_decoder_fn_train(encoder_state)
outputs_train, state_train = dynamic_rnn_decoder(
decoder_fn=dynamic_fn_train, ...)
```
Further usage can be found in the `kernel_tests/seq2seq_test.py`.
Args:
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
name: (default: `None`) NameScope for the decoder function;
defaults to "simple_decoder_fn_train"
Returns:
A decoder function with the required interface of `dynamic_rnn_decoder`
intended for training.
"""
with ops.name_scope(name, "simple_decoder_fn_train", [encoder_state]):
pass
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
""" Decoder function used in the `dynamic_rnn_decoder` with the purpose of
training.
Args:
time: positive integer constant reflecting the current timestep.
cell_state: state of RNNCell.
cell_input: input provided by `dynamic_rnn_decoder`.
cell_output: output of RNNCell.
context_state: context state provided by `dynamic_rnn_decoder`.
Returns:
A tuple (done, next state, next input, emit output, next context state)
where:
done: `None`, which is used by the `dynamic_rnn_decoder` to indicate
that `sequence_lengths` in `dynamic_rnn_decoder` should be used.
next state: `cell_state`, this decoder function does not modify the
given state.
next input: `cell_input`, this decoder function does not modify the
given input. The input could be modified when applying e.g. attention.
emit output: `cell_output`, this decoder function does not modify the
given output.
next context state: `context_state`, this decoder function does not
modify the given context state. The context state could be modified when
applying e.g. beam search.
"""
with ops.name_scope(name, "simple_decoder_fn_train",
[time, cell_state, cell_input, cell_output,
context_state]):
if cell_state is None: # first call, return encoder_state
return (None, encoder_state, cell_input, cell_output, context_state)
else:
return (None, cell_state, cell_input, cell_output, context_state)
return decoder_fn
def simple_decoder_fn_inference(output_fn, encoder_state, embeddings,
start_of_sequence_id, end_of_sequence_id,
maximum_length, num_decoder_symbols,
dtype=dtypes.int32, name=None):
""" Simple decoder function for a sequence-to-sequence model used in the
`dynamic_rnn_decoder`.
The `simple_decoder_fn_inference` is a simple inference function for a
sequence-to-sequence model. It should be used when `dynamic_rnn_decoder` is
in the inference mode.
The `simple_decoder_fn_inference` is called with a set of the user arguments
and returns the `decoder_fn`, which can be passed to the
`dynamic_rnn_decoder`, such that
```
dynamic_fn_inference = simple_decoder_fn_inference(...)
outputs_inference, state_inference = dynamic_rnn_decoder(
decoder_fn=dynamic_fn_inference, ...)
```
Further usage can be found in the `kernel_tests/seq2seq_test.py`.
Args:
output_fn: An output function to project your `cell_output` onto class
logits.
An example of an output function;
```
tf.variable_scope("decoder") as varscope
output_fn = lambda x: layers.linear(x, num_decoder_symbols,
scope=varscope)
outputs_train, state_train = seq2seq.dynamic_rnn_decoder(...)
logits_train = output_fn(outputs_train)
varscope.reuse_variables()
logits_inference, state_inference = seq2seq.dynamic_rnn_decoder(
output_fn=output_fn, ...)
```
If `None` is supplied it will act as an identity function, which
might be wanted when using the RNNCell `OutputProjectionWrapper`.
encoder_state: The encoded state to initialize the `dynamic_rnn_decoder`.
embeddings: The embeddings matrix used for the decoder sized
`[num_decoder_symbols, embedding_size]`.
start_of_sequence_id: The start of sequence ID in the decoder embeddings.
end_of_sequence_id: The end of sequence ID in the decoder embeddings.
maximum_length: The maximum allowed of time steps to decode.
num_decoder_symbols: The number of classes to decode at each time step.
dtype: (default: `dtypes.int32`) The default data type to use when
handling integer objects.
name: (default: `None`) NameScope for the decoder function;
defaults to "simple_decoder_fn_inference"
Returns:
A decoder function with the required interface of `dynamic_rnn_decoder`
intended for inference.
"""
with ops.name_scope(name, "simple_decoder_fn_inference",
[output_fn, encoder_state, embeddings,
start_of_sequence_id, end_of_sequence_id,
maximum_length, num_decoder_symbols, dtype]):
start_of_sequence_id = ops.convert_to_tensor(start_of_sequence_id, dtype)
end_of_sequence_id = ops.convert_to_tensor(end_of_sequence_id, dtype)
maximum_length = ops.convert_to_tensor(maximum_length, dtype)
num_decoder_symbols = ops.convert_to_tensor(num_decoder_symbols, dtype)
encoder_info = nest.flatten(encoder_state)[0]
batch_size = encoder_info.get_shape()[0].value
if output_fn is None:
output_fn = lambda x: x
if batch_size is None:
batch_size = array_ops.shape(encoder_info)[0]
def decoder_fn(time, cell_state, cell_input, cell_output, context_state):
""" Decoder function used in the `dynamic_rnn_decoder` with the purpose of
inference.
The main difference between this decoder function and the `decoder_fn` in
`simple_decoder_fn_train` is how `next_cell_input` is calculated. In this
decoder function we calculate the next input by applying an argmax across
the feature dimension of the output from the decoder. This is a
greedy-search approach. (Bahdanau et al., 2014) & (Sutskever et al., 2014)
use beam-search instead.
Args:
time: positive integer constant reflecting the current timestep.
cell_state: state of RNNCell.
cell_input: input provided by `dynamic_rnn_decoder`.
cell_output: output of RNNCell.
context_state: context state provided by `dynamic_rnn_decoder`.
Returns:
A tuple (done, next state, next input, emit output, next context state)
where:
done: A boolean vector to indicate which sentences has reached a
`end_of_sequence_id`. This is used for early stopping by the
`dynamic_rnn_decoder`. When `time>=maximum_length` a boolean vector with
all elements as `true` is returned.
next state: `cell_state`, this decoder function does not modify the
given state.
next input: The embedding from argmax of the `cell_output` is used as
`next_input`.
emit output: If `output_fn is None` the supplied `cell_output` is
returned, else the `output_fn` is used to update the `cell_output`
before calculating `next_input` and returning `cell_output`.
next context state: `context_state`, this decoder function does not
modify the given context state. The context state could be modified when
applying e.g. beam search.
"""
with ops.name_scope(name, "simple_decoder_fn_inference",
[time, cell_state, cell_input, cell_output,
context_state]):
if cell_input is not None:
raise ValueError("Expected cell_input to be None, but saw: %s" %
cell_input)
if cell_output is None:
# invariant that this is time == 0
next_input_id = array_ops.ones([batch_size,], dtype=dtype) * (
start_of_sequence_id)
done = array_ops.zeros([batch_size,], dtype=dtypes.bool)
cell_state = encoder_state
cell_output = array_ops.zeros([num_decoder_symbols],
dtype=dtypes.float32)
else:
cell_output = output_fn(cell_output)
next_input_id = math_ops.cast(
math_ops.argmax(cell_output, 1), dtype=dtype)
done = math_ops.equal(next_input_id, end_of_sequence_id)
next_input = array_ops.gather(embeddings, next_input_id)
# if time > maxlen, return all true vector
done = control_flow_ops.cond(math_ops.greater(time, maximum_length),
lambda: array_ops.ones([batch_size,], dtype=dtypes.bool),
lambda: done)
return (done, cell_state, next_input, cell_output, context_state)
return decoder_fn
|
gpl-3.0
|
esikachev/scenario
|
sahara/tests/unit/plugins/vanilla/v1_2_1/test_plugin.py
|
3
|
11886
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from sahara import conductor as cond
from sahara import context
from sahara import exceptions as e
from sahara.plugins import base as pb
from sahara.plugins import exceptions as ex
from sahara.plugins.vanilla import plugin as p
from sahara.plugins.vanilla.v1_2_1 import config_helper as c_h
from sahara.plugins.vanilla.v1_2_1 import mysql_helper as m_h
from sahara.plugins.vanilla.v1_2_1 import versionhandler as v_h
from sahara.tests.unit import base
from sahara.tests.unit import testutils as tu
from sahara.utils import edp
conductor = cond.API
class VanillaPluginTest(base.SaharaWithDbTestCase):
def setUp(self):
super(VanillaPluginTest, self).setUp()
pb.setup_plugins()
self.pl = p.VanillaProvider()
def test_validate(self):
self.ng = []
self.ng.append(tu.make_ng_dict("nn", "f1", ["namenode"], 0))
self.ng.append(tu.make_ng_dict("jt", "f1", ["jobtracker"], 0))
self.ng.append(tu.make_ng_dict("tt", "f1", ["tasktracker"], 0))
self.ng.append(tu.make_ng_dict("oozie", "f1", ["oozie"], 0))
self._validate_case(1, 1, 10, 1)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(0, 1, 10, 1)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(2, 1, 10, 1)
with testtools.ExpectedException(ex.RequiredServiceMissingException):
self._validate_case(1, 0, 10, 1)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(1, 2, 10, 1)
with testtools.ExpectedException(ex.InvalidComponentCountException):
self._validate_case(1, 1, 0, 2)
with testtools.ExpectedException(ex.RequiredServiceMissingException):
self._validate_case(1, 0, 0, 1)
def _validate_case(self, *args):
lst = []
for i in range(0, len(args)):
self.ng[i]['count'] = args[i]
lst.append(self.ng[i])
cl = tu.create_cluster("cluster1", "tenant1", "vanilla", "1.2.1", lst)
self.pl.validate(cl)
def test_get_configs(self):
cl_configs = self.pl.get_configs("1.2.1")
for cfg in cl_configs:
if cfg.config_type is "bool":
self.assertIsInstance(cfg.default_value, bool)
elif cfg.config_type is "int":
try:
self.assertIsInstance(cfg.default_value, int)
except AssertionError:
self.assertIsInstance(cfg.default_value, long)
else:
self.assertIsInstance(cfg.default_value, str)
self.assertNotIn(cfg.name, c_h.HIDDEN_CONFS)
def test_extract_environment_configs(self):
env_configs = {
"JobFlow": {
'Oozie Heap Size': 4000
},
"MapReduce": {
'Job Tracker Heap Size': 1000,
'Task Tracker Heap Size': "2000"
},
"HDFS": {
'Name Node Heap Size': 3000,
'Data Node Heap Size': "4000"
},
"Wrong-applicable-target": {
't1': 4
}}
self.assertEqual(c_h.extract_environment_confs(env_configs),
['CATALINA_OPTS -Xmx4000m',
'HADOOP_DATANODE_OPTS=\\"-Xmx4000m\\"',
'HADOOP_JOBTRACKER_OPTS=\\"-Xmx1000m\\"',
'HADOOP_NAMENODE_OPTS=\\"-Xmx3000m\\"',
'HADOOP_TASKTRACKER_OPTS=\\"-Xmx2000m\\"'])
def test_extract_xml_configs(self):
xml_configs = {
"HDFS": {
'dfs.replication': 3,
'fs.default.name': 'hdfs://',
'key': 'value'
},
"MapReduce": {
'io.sort.factor': 10,
'mapred.reduce.tasks': 2
},
"Wrong-applicable-target": {
'key': 'value'
}
}
self.assertEqual(c_h.extract_xml_confs(xml_configs),
[('dfs.replication', 3),
('fs.default.name', 'hdfs://'),
('io.sort.factor', 10),
('mapred.reduce.tasks', 2)])
def test_general_configs(self):
gen_config = {
c_h.ENABLE_SWIFT.name: {
'default_value': c_h.ENABLE_SWIFT.default_value,
'conf': {
'fs.swift.enabled': True
}
},
c_h.ENABLE_MYSQL.name: {
'default_value': c_h.ENABLE_MYSQL.default_value,
'conf': {
'oozie.service.JPAService.jdbc.username': 'oozie'
}
}
}
all_configured = {
'fs.swift.enabled': True,
'oozie.service.JPAService.jdbc.username': 'oozie'
}
configs = {
'general': {
'Enable Swift': True
}
}
cfg = c_h.generate_cfg_from_general({}, configs, gen_config)
self.assertEqual(cfg, all_configured)
configs['general'].update({'Enable MySQL': False})
cfg = c_h.generate_cfg_from_general({}, configs, gen_config)
self.assertEqual(cfg, {'fs.swift.enabled': True})
configs['general'].update({
'Enable Swift': False,
'Enable MySQL': False
})
cfg = c_h.generate_cfg_from_general({}, configs, gen_config)
self.assertEqual(cfg, {})
configs = {}
cfg = c_h.generate_cfg_from_general({}, configs, gen_config)
self.assertEqual(cfg, all_configured)
def test_get_mysql_configs(self):
cfg = m_h.get_required_mysql_configs(None, None)
self.assertEqual(cfg, m_h.get_oozie_mysql_configs())
cfg = m_h.get_required_mysql_configs("metastore_host", "passwd")
cfg_to_compare = m_h.get_oozie_mysql_configs()
cfg_to_compare.update(m_h.get_hive_mysql_configs(
"metastore_host", "passwd"))
self.assertEqual(cfg, cfg_to_compare)
@mock.patch('sahara.conductor.api.LocalApi.cluster_get')
def test_get_config_value(self, cond_get_cluster):
cluster = self._get_fake_cluster()
cond_get_cluster.return_value = cluster
self.assertEqual(
c_h.get_config_value('HDFS', 'fs.default.name', cluster),
'hdfs://inst1:8020')
self.assertEqual(
c_h.get_config_value('HDFS', 'spam', cluster), 'eggs')
self.assertEqual(
c_h.get_config_value('HDFS', 'dfs.safemode.extension'), 30000)
self.assertRaises(e.ConfigurationError,
c_h.get_config_value,
'MapReduce', 'spam', cluster)
@mock.patch('sahara.plugins.vanilla.v1_2_1.versionhandler.context')
@mock.patch('sahara.conductor.api.LocalApi.cluster_update')
def test_set_cluster_info(self, cond_cluster_update, context_mock):
cluster = self._get_fake_cluster()
v_h.VersionHandler()._set_cluster_info(cluster)
expected_info = {
'HDFS': {
'NameNode': 'hdfs://inst1:8020',
'Web UI': 'http://127.0.0.1:50070'
},
'MapReduce': {
'Web UI': 'http://127.0.0.1:50030',
'JobTracker': 'inst1:8021'
},
'JobFlow': {
'Oozie': 'http://127.0.0.1:11000'
}
}
cond_cluster_update.assert_called_with(context_mock.ctx(), cluster,
{'info': expected_info})
def _get_fake_cluster(self):
class FakeNG(object):
def __init__(self, name, flavor, processes, count, instances=None,
configuration=None, cluster_id=None):
self.name = name
self.flavor = flavor
self.node_processes = processes
self.count = count
self.instances = instances or []
self.ng_configuration = configuration
self.cluster_id = cluster_id
def configuration(self):
return self.ng_configuration
def storage_paths(self):
return ['/mnt']
class FakeCluster(object):
def __init__(self, name, tenant, plugin, version, node_groups):
self.name = name
self.tenant = tenant
self.plugin = plugin
self.version = version
self.node_groups = node_groups
class FakeInst(object):
def __init__(self, inst_name, inst_id, management_ip):
self.instance_name = inst_name
self.instance_id = inst_id
self.management_ip = management_ip
def hostname(self):
return self.instance_name
ms_inst = FakeInst('inst1', 'id1', '127.0.0.1')
wk_inst = FakeInst('inst2', 'id2', '127.0.0.1')
conf = {
"MapReduce": {},
"HDFS": {
"spam": "eggs"
},
"JobFlow": {},
}
ng1 = FakeNG('master', 'fl1', ['namenode', 'jobtracker', 'oozie'], 1,
[ms_inst], conf, 'id1')
ng2 = FakeNG('worker', 'fl1', ['datanode', 'tasktracker'], 1,
[wk_inst], conf, 'id1')
return FakeCluster('cl1', 'ten1', 'vanilla', '1.2.1', [ng1, ng2])
def test_get_hadoop_ssh_keys(self):
cluster_dict = {
'name': 'cluster1',
'plugin_name': 'mock_plugin',
'hadoop_version': 'mock_version',
'default_image_id': 'initial',
'node_groups': [tu.make_ng_dict("ng1", "f1", ["s1"], 1)],
'extra': {'test': '1'}}
cluster1 = conductor.cluster_create(context.ctx(), cluster_dict)
(private_key1, public_key1) = c_h.get_hadoop_ssh_keys(cluster1)
# should store keys for old cluster
cluster1 = conductor.cluster_get(context.ctx(), cluster1)
(private_key2, public_key2) = c_h.get_hadoop_ssh_keys(cluster1)
self.assertEqual(public_key1, public_key2)
self.assertEqual(private_key1, private_key2)
# should generate new keys for new cluster
cluster_dict.update({'name': 'cluster2'})
cluster2 = conductor.cluster_create(context.ctx(), cluster_dict)
(private_key3, public_key3) = c_h.get_hadoop_ssh_keys(cluster2)
self.assertNotEqual(public_key1, public_key3)
self.assertNotEqual(private_key1, private_key3)
@mock.patch('sahara.service.edp.hdfs_helper.create_dir_hadoop1')
def test_edp_calls_hadoop1_create_dir(self, create_dir):
cluster_dict = {
'name': 'cluster1',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'default_image_id': 'image'}
cluster = conductor.cluster_create(context.ctx(), cluster_dict)
plugin = pb.PLUGINS.get_plugin(cluster.plugin_name)
plugin.get_edp_engine(cluster, edp.JOB_TYPE_PIG).create_hdfs_dir(
mock.Mock(), '/tmp')
self.assertEqual(1, create_dir.call_count)
|
apache-2.0
|
klmitch/nova
|
nova/tests/unit/image/test_glance.py
|
1
|
98337
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
from io import StringIO
import urllib.parse as urlparse
import cryptography
from cursive import exception as cursive_exception
import ddt
import glanceclient.exc
from glanceclient.v1 import images
from glanceclient.v2 import schemas
from keystoneauth1 import loading as ks_loading
import mock
from oslo_utils.fixture import uuidsentinel as uuids
import testtools
import nova.conf
from nova import context
from nova import exception
from nova.image import glance
from nova import objects
from nova import service_auth
from nova.storage import rbd_utils
from nova import test
CONF = nova.conf.CONF
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
class FakeSchema(object):
def __init__(self, raw_schema):
self.raw_schema = raw_schema
self.base_props = ('checksum', 'container_format', 'created_at',
'direct_url', 'disk_format', 'file', 'id',
'locations', 'min_disk', 'min_ram', 'name',
'owner', 'protected', 'schema', 'self', 'size',
'status', 'tags', 'updated_at', 'virtual_size',
'visibility')
def is_base_property(self, prop_name):
return prop_name in self.base_props
def raw(self):
return copy.deepcopy(self.raw_schema)
image_fixtures = {
'active_image_v1': {
'checksum': 'eb9139e4942121f22bbc2afc0400b2a4',
'container_format': 'ami',
'created_at': '2015-08-31T19:37:41Z',
'deleted': False,
'disk_format': 'ami',
'id': 'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5',
'is_public': True,
'min_disk': 0,
'min_ram': 0,
'name': 'cirros-0.3.4-x86_64-uec',
'owner': 'ea583a4f34444a12bbe4e08c2418ba1f',
'properties': {
'kernel_id': 'f6ebd5f0-b110-4406-8c1e-67b28d4e85e7',
'ramdisk_id': '868efefc-4f2d-4ed8-82b1-7e35576a7a47'},
'protected': False,
'size': 25165824,
'status': 'active',
'updated_at': '2015-08-31T19:37:45Z'},
'active_image_v2': {
'checksum': 'eb9139e4942121f22bbc2afc0400b2a4',
'container_format': 'ami',
'created_at': '2015-08-31T19:37:41Z',
'direct_url': 'swift+config://ref1/glance/'
'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5',
'disk_format': 'ami',
'file': '/v2/images/'
'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5/file',
'id': 'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5',
'kernel_id': 'f6ebd5f0-b110-4406-8c1e-67b28d4e85e7',
'locations': [
{'metadata': {},
'url': 'swift+config://ref1/glance/'
'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5'}],
'min_disk': 0,
'min_ram': 0,
'name': 'cirros-0.3.4-x86_64-uec',
'owner': 'ea583a4f34444a12bbe4e08c2418ba1f',
'protected': False,
'ramdisk_id': '868efefc-4f2d-4ed8-82b1-7e35576a7a47',
'schema': '/v2/schemas/image',
'size': 25165824,
'status': 'active',
'tags': [],
'updated_at': '2015-08-31T19:37:45Z',
'virtual_size': None,
'visibility': 'public'},
'empty_image_v1': {
'created_at': '2015-09-01T22:37:32.000000',
'deleted': False,
'id': '885d1cb0-9f5c-4677-9d03-175be7f9f984',
'is_public': False,
'min_disk': 0,
'min_ram': 0,
'owner': 'ea583a4f34444a12bbe4e08c2418ba1f',
'properties': {},
'protected': False,
'size': 0,
'status': 'queued',
'updated_at': '2015-09-01T22:37:32.000000'
},
'empty_image_v2': {
'checksum': None,
'container_format': None,
'created_at': '2015-09-01T22:37:32Z',
'disk_format': None,
'file': '/v2/images/885d1cb0-9f5c-4677-9d03-175be7f9f984/file',
'id': '885d1cb0-9f5c-4677-9d03-175be7f9f984',
'locations': [],
'min_disk': 0,
'min_ram': 0,
'name': None,
'owner': 'ea583a4f34444a12bbe4e08c2418ba1f',
'protected': False,
'schema': '/v2/schemas/image',
'size': None,
'status': 'queued',
'tags': [],
'updated_at': '2015-09-01T22:37:32Z',
'virtual_size': None,
'visibility': 'private'
},
'custom_property_image_v1': {
'checksum': 'e533283e6aac072533d1d091a7d2e413',
'container_format': 'bare',
'created_at': '2015-09-02T00:31:16.000000',
'deleted': False,
'disk_format': 'qcow2',
'id': '10ca6b6b-48f4-43ac-8159-aa9e9353f5e4',
'is_public': False,
'min_disk': 0,
'min_ram': 0,
'name': 'fake_name',
'owner': 'ea583a4f34444a12bbe4e08c2418ba1f',
'properties': {'image_type': 'fake_image_type'},
'protected': False,
'size': 616,
'status': 'active',
'updated_at': '2015-09-02T00:31:17.000000'
},
'custom_property_image_v2': {
'checksum': 'e533283e6aac072533d1d091a7d2e413',
'container_format': 'bare',
'created_at': '2015-09-02T00:31:16Z',
'disk_format': 'qcow2',
'file': '/v2/images/10ca6b6b-48f4-43ac-8159-aa9e9353f5e4/file',
'id': '10ca6b6b-48f4-43ac-8159-aa9e9353f5e4',
'image_type': 'fake_image_type',
'min_disk': 0,
'min_ram': 0,
'name': 'fake_name',
'owner': 'ea583a4f34444a12bbe4e08c2418ba1f',
'protected': False,
'schema': '/v2/schemas/image',
'size': 616,
'status': 'active',
'tags': [],
'updated_at': '2015-09-02T00:31:17Z',
'virtual_size': None,
'visibility': 'private'
}
}
def fake_glance_response(data):
with mock.patch('glanceclient.common.utils._extract_request_id'):
return glanceclient.common.utils.RequestIdProxy([data, None])
class ImageV2(dict):
# Wrapper class that is used to comply with dual nature of
# warlock objects, that are inherited from dict and have 'schema'
# attribute.
schema = mock.MagicMock()
class TestConversions(test.NoDBTestCase):
def test_convert_timestamps_to_datetimes(self):
fixture = {'name': None,
'properties': {},
'status': None,
'is_public': None,
'created_at': NOW_GLANCE_FORMAT,
'updated_at': NOW_GLANCE_FORMAT,
'deleted_at': NOW_GLANCE_FORMAT}
result = glance._convert_timestamps_to_datetimes(fixture)
self.assertEqual(result['created_at'], NOW_DATETIME)
self.assertEqual(result['updated_at'], NOW_DATETIME)
self.assertEqual(result['deleted_at'], NOW_DATETIME)
def _test_extracting_missing_attributes(self, include_locations):
# Verify behavior from glance objects that are missing attributes
# TODO(jaypipes): Find a better way of testing this crappy
# glanceclient magic object stuff.
class MyFakeGlanceImage(object):
def __init__(self, metadata):
IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at',
'updated_at', 'status', 'min_disk',
'min_ram', 'is_public']
raw = dict.fromkeys(IMAGE_ATTRIBUTES)
raw.update(metadata)
self.__dict__['raw'] = raw
def __getattr__(self, key):
try:
return self.__dict__['raw'][key]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
try:
self.__dict__['raw'][key] = value
except KeyError:
raise AttributeError(key)
metadata = {
'id': 1,
'created_at': NOW_DATETIME,
'updated_at': NOW_DATETIME,
}
image = MyFakeGlanceImage(metadata)
observed = glance._extract_attributes(
image, include_locations=include_locations)
expected = {
'id': 1,
'name': None,
'is_public': None,
'size': 0,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': NOW_DATETIME,
'updated_at': NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None
}
if include_locations:
expected['locations'] = None
expected['direct_url'] = None
self.assertEqual(expected, observed)
def test_extracting_missing_attributes_include_locations(self):
self._test_extracting_missing_attributes(include_locations=True)
def test_extracting_missing_attributes_exclude_locations(self):
self._test_extracting_missing_attributes(include_locations=False)
class TestExceptionTranslations(test.NoDBTestCase):
def test_client_forbidden_to_imagenotauthed(self):
in_exc = glanceclient.exc.Forbidden('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotAuthorized)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
in_exc = glanceclient.exc.HTTPForbidden('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotAuthorized)
def test_client_notfound_converts_to_imagenotfound(self):
in_exc = glanceclient.exc.NotFound('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotFound)
def test_client_httpnotfound_converts_to_imagenotfound(self):
in_exc = glanceclient.exc.HTTPNotFound('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotFound)
def test_client_httpoverlimit_converts_to_imagequotaexceeded(self):
in_exc = glanceclient.exc.HTTPOverLimit('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageQuotaExceeded)
class TestGlanceSerializer(test.NoDBTestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'virtual': 'aaa',
'device': 'bbb'},
{'virtual': 'xxx',
'device': 'yyy'}],
'block_device_mapping': [
{'virtual_device': 'fake',
'device_name': '/dev/fake'},
{'virtual_device': 'ephemeral0',
'device_name': '/dev/fake0'}]}}
# NOTE(tdurakov): Assertion of serialized objects won't work
# during using of random PYTHONHASHSEED. Assertion of
# serialized/deserialized object and initial one is enough
converted = glance._convert_to_string(metadata)
self.assertEqual(glance._convert_from_string(converted), metadata)
class TestGetImageService(test.NoDBTestCase):
@mock.patch.object(glance.GlanceClientWrapper, '__init__',
return_value=None)
def test_get_remote_service_from_id(self, gcwi_mocked):
id_or_uri = '123'
_ignored, image_id = glance.get_remote_image_service(
mock.sentinel.ctx, id_or_uri)
self.assertEqual(id_or_uri, image_id)
gcwi_mocked.assert_called_once_with()
@mock.patch.object(glance.GlanceClientWrapper, '__init__',
return_value=None)
def test_get_remote_service_from_href(self, gcwi_mocked):
id_or_uri = 'http://127.0.0.1/v1/images/123'
_ignored, image_id = glance.get_remote_image_service(
mock.sentinel.ctx, id_or_uri)
self.assertEqual('123', image_id)
gcwi_mocked.assert_called_once_with(context=mock.sentinel.ctx,
endpoint='http://127.0.0.1')
class TestCreateGlanceClient(test.NoDBTestCase):
@mock.patch.object(service_auth, 'get_auth_plugin')
@mock.patch.object(ks_loading, 'load_session_from_conf_options')
@mock.patch('glanceclient.Client')
def test_glanceclient_with_ks_session(self, mock_client, mock_load,
mock_get_auth):
session = "fake_session"
mock_load.return_value = session
auth = "fake_auth"
mock_get_auth.return_value = auth
ctx = context.RequestContext('fake', 'fake', global_request_id='reqid')
endpoint = "fake_endpoint"
mock_client.side_effect = ["a", "b"]
# Reset the cache, so we know its empty before we start
glance._SESSION = None
result1 = glance._glanceclient_from_endpoint(ctx, endpoint, 2)
result2 = glance._glanceclient_from_endpoint(ctx, endpoint, 2)
# Ensure that session is only loaded once.
mock_load.assert_called_once_with(glance.CONF, "glance")
self.assertEqual(session, glance._SESSION)
# Ensure new client created every time
client_call = mock.call(2, auth="fake_auth",
endpoint_override=endpoint, session=session,
global_request_id='reqid')
mock_client.assert_has_calls([client_call, client_call])
self.assertEqual("a", result1)
self.assertEqual("b", result2)
def test_generate_identity_headers(self):
ctx = context.RequestContext('user', 'tenant',
auth_token='token', roles=["a", "b"])
result = glance.generate_identity_headers(ctx, 'test')
expected = {
'X-Auth-Token': 'token',
'X-User-Id': 'user',
'X-Tenant-Id': 'tenant',
'X-Roles': 'a,b',
'X-Identity-Status': 'test',
}
self.assertDictEqual(expected, result)
class TestGlanceClientWrapperRetries(test.NoDBTestCase):
def setUp(self):
super(TestGlanceClientWrapperRetries, self).setUp()
self.ctx = context.RequestContext('fake', 'fake')
api_servers = [
'http://host1:9292',
'https://host2:9293',
'http://host3:9294'
]
self.flags(api_servers=api_servers, group='glance')
def assert_retry_attempted(self, sleep_mock, client, expected_url):
client.call(self.ctx, 1, 'get', args=('meow',))
sleep_mock.assert_called_once_with(1)
self.assertEqual(str(client.api_server), expected_url)
def assert_retry_not_attempted(self, sleep_mock, client):
self.assertRaises(exception.GlanceConnectionFailed,
client.call, self.ctx, 1, 'get', args=('meow',))
self.assertFalse(sleep_mock.called)
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._glanceclient_from_endpoint')
def test_static_client_without_retries(self, create_client_mock,
sleep_mock):
side_effect = glanceclient.exc.ServiceUnavailable
self._mock_client_images_response(create_client_mock, side_effect)
self.flags(num_retries=0, group='glance')
client = self._get_static_client(create_client_mock)
self.assert_retry_not_attempted(sleep_mock, client)
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._glanceclient_from_endpoint')
def test_static_client_with_retries(self, create_client_mock,
sleep_mock):
side_effect = [
glanceclient.exc.ServiceUnavailable,
None
]
self._mock_client_images_response(create_client_mock, side_effect)
self.flags(num_retries=1, group='glance')
client = self._get_static_client(create_client_mock)
self.assert_retry_attempted(sleep_mock, client, 'http://host4:9295')
@mock.patch('random.shuffle')
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._glanceclient_from_endpoint')
def test_default_client_with_retries(self, create_client_mock,
sleep_mock, shuffle_mock):
side_effect = [
glanceclient.exc.ServiceUnavailable,
None
]
self._mock_client_images_response(create_client_mock, side_effect)
self.flags(num_retries=1, group='glance')
client = glance.GlanceClientWrapper()
self.assert_retry_attempted(sleep_mock, client, 'https://host2:9293')
@mock.patch('random.shuffle')
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._glanceclient_from_endpoint')
def test_retry_works_with_generators(self, create_client_mock,
sleep_mock, shuffle_mock):
def some_generator(exception):
if exception:
raise glanceclient.exc.ServiceUnavailable('Boom!')
yield 'something'
side_effect = [
some_generator(exception=True),
some_generator(exception=False),
]
self._mock_client_images_response(create_client_mock, side_effect)
self.flags(num_retries=1, group='glance')
client = glance.GlanceClientWrapper()
self.assert_retry_attempted(sleep_mock, client, 'https://host2:9293')
@mock.patch('random.shuffle')
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._glanceclient_from_endpoint')
def test_default_client_without_retries(self, create_client_mock,
sleep_mock, shuffle_mock):
side_effect = glanceclient.exc.ServiceUnavailable
self._mock_client_images_response(create_client_mock, side_effect)
self.flags(num_retries=0, group='glance')
client = glance.GlanceClientWrapper()
# Here we are testing the behaviour that calling client.call() twice
# when there are no retries will cycle through the api_servers and not
# sleep (which would be an indication of a retry)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, self.ctx, 1, 'get', args=('meow',))
self.assertEqual(str(client.api_server), 'http://host1:9292')
self.assertFalse(sleep_mock.called)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, self.ctx, 1, 'get', args=('meow',))
self.assertEqual(str(client.api_server), 'https://host2:9293')
self.assertFalse(sleep_mock.called)
def _get_static_client(self, create_client_mock):
version = 2
url = 'http://host4:9295'
client = glance.GlanceClientWrapper(context=self.ctx, endpoint=url)
create_client_mock.assert_called_once_with(self.ctx, mock.ANY, version)
return client
def _mock_client_images_response(self, create_client_mock, side_effect):
client_mock = mock.MagicMock(spec=glanceclient.Client)
images_mock = mock.MagicMock(spec=images.ImageManager)
images_mock.get.side_effect = side_effect
type(client_mock).images = mock.PropertyMock(return_value=images_mock)
create_client_mock.return_value = client_mock
class TestCommonPropertyNameConflicts(test.NoDBTestCase):
"""Tests that images that have common property names like "version" don't
cause an exception to be raised from the wacky GlanceClientWrapper magic
call() method.
:see https://bugs.launchpad.net/nova/+bug/1717547
"""
@mock.patch('nova.image.glance.GlanceClientWrapper._create_onetime_client')
def test_version_property_conflicts(self, mock_glance_client):
client = mock.MagicMock()
mock_glance_client.return_value = client
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2()
# Simulate the process of snapshotting a server that was launched with
# an image with the properties collection containing a (very
# commonly-named) "version" property.
image_meta = {
'id': 1,
'version': 'blows up',
}
# This call would blow up before the fix for 1717547
service.create(ctx, image_meta)
class TestDownloadNoDirectUri(test.NoDBTestCase):
"""Tests the download method of the GlanceImageServiceV2 when the
default of not allowing direct URI transfers is set.
"""
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
def test_download_no_data_no_dest_path_v2(self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = fake_glance_response(
mock.sentinel.image_chunks)
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
res = service.download(ctx, mock.sentinel.image_id)
self.assertFalse(show_mock.called)
self.assertFalse(open_mock.called)
client.call.assert_called_once_with(
ctx, 2, 'data', args=(mock.sentinel.image_id,))
self.assertEqual(mock.sentinel.image_chunks, res)
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
def test_download_data_no_dest_path_v2(self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = fake_glance_response([1, 2, 3])
ctx = mock.sentinel.ctx
data = mock.MagicMock()
service = glance.GlanceImageServiceV2(client)
res = service.download(ctx, mock.sentinel.image_id, data=data)
self.assertFalse(show_mock.called)
self.assertFalse(open_mock.called)
client.call.assert_called_once_with(
ctx, 2, 'data', args=(mock.sentinel.image_id,))
self.assertIsNone(res)
data.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
self.assertFalse(data.close.called)
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('nova.image.glance.GlanceImageServiceV2._safe_fsync')
def test_download_no_data_dest_path_v2(self, fsync_mock, show_mock,
open_mock):
client = mock.MagicMock()
client.call.return_value = fake_glance_response([1, 2, 3])
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
open_mock.return_value = writer
service = glance.GlanceImageServiceV2(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertFalse(show_mock.called)
client.call.assert_called_once_with(
ctx, 2, 'data', args=(mock.sentinel.image_id,))
open_mock.assert_called_once_with(mock.sentinel.dst_path, 'wb')
fsync_mock.assert_called_once_with(writer)
self.assertIsNone(res)
writer.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
writer.close.assert_called_once_with()
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
def test_download_data_dest_path_v2(self, show_mock, open_mock):
# NOTE(jaypipes): This really shouldn't be allowed, but because of the
# horrible design of the download() method in GlanceImageServiceV2, no
# error is raised, and the dst_path is ignored...
# #TODO(jaypipes): Fix the aforementioned horrible design of
# the download() method.
client = mock.MagicMock()
client.call.return_value = fake_glance_response([1, 2, 3])
ctx = mock.sentinel.ctx
data = mock.MagicMock()
service = glance.GlanceImageServiceV2(client)
res = service.download(ctx, mock.sentinel.image_id, data=data)
self.assertFalse(show_mock.called)
self.assertFalse(open_mock.called)
client.call.assert_called_once_with(
ctx, 2, 'data', args=(mock.sentinel.image_id,))
self.assertIsNone(res)
data.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
self.assertFalse(data.close.called)
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
def test_download_data_dest_path_write_fails_v2(
self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = fake_glance_response([1, 2, 3])
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
# NOTE(mikal): data is a file like object, which in our case always
# raises an exception when we attempt to write to the file.
class FakeDiskException(Exception):
pass
class Exceptionator(StringIO):
def write(self, _):
raise FakeDiskException('Disk full!')
self.assertRaises(FakeDiskException, service.download, ctx,
mock.sentinel.image_id, data=Exceptionator())
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
def test_download_no_returned_image_data_v2(
self, show_mock, open_mock):
"""Verify images with no data are handled correctly."""
client = mock.MagicMock()
client.call.return_value = fake_glance_response(None)
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
with testtools.ExpectedException(exception.ImageUnacceptable):
service.download(ctx, mock.sentinel.image_id)
# TODO(stephenfin): Drop this test since it's not possible to run in
# production
@mock.patch('os.path.getsize', return_value=1)
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.GlanceImageServiceV2._get_transfer_method')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
def test_download_direct_file_uri_v2(
self, show_mock, get_tran_mock, open_mock, getsize_mock):
self.flags(allowed_direct_url_schemes=['file'], group='glance')
show_mock.return_value = {
'locations': [
{
'url': 'file:///files/image',
'metadata': mock.sentinel.loc_meta
}
]
}
tran_mod = mock.MagicMock()
get_tran_mock.return_value = tran_mod
client = mock.MagicMock()
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
open_mock.return_value = writer
service = glance.GlanceImageServiceV2(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertIsNone(res)
self.assertFalse(client.call.called)
show_mock.assert_called_once_with(ctx,
mock.sentinel.image_id,
include_locations=True)
get_tran_mock.assert_called_once_with('file')
tran_mod.assert_called_once_with(ctx, mock.ANY,
mock.sentinel.dst_path,
mock.sentinel.loc_meta)
@mock.patch('glanceclient.common.utils.IterableWithLength')
@mock.patch('os.path.getsize', return_value=1)
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2._get_verifier')
@mock.patch('nova.image.glance.GlanceImageServiceV2._get_transfer_method')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
def test_download_direct_rbd_uri_v2(
self, show_mock, get_tran_mock, get_verifier_mock, log_mock,
open_mock, getsize_mock, iterable_with_length_mock):
self.flags(enable_rbd_download=True, group='glance')
show_mock.return_value = {
'locations': [
{
'url': 'rbd://cluster_uuid/pool_name/image_uuid/snapshot',
'metadata': mock.sentinel.loc_meta
}
]
}
tran_mod = mock.MagicMock()
get_tran_mock.return_value = tran_mod
client = mock.MagicMock()
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
open_mock.return_value = writer
iterable_with_length_mock.return_value = ["rbd1", "rbd2"]
service = glance.GlanceImageServiceV2(client)
verifier = mock.MagicMock()
get_verifier_mock.return_value = verifier
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path,
trusted_certs=mock.sentinel.trusted_certs)
self.assertIsNone(res)
show_mock.assert_called_once_with(ctx,
mock.sentinel.image_id,
include_locations=True)
tran_mod.assert_called_once_with(ctx, mock.ANY,
mock.sentinel.dst_path,
mock.sentinel.loc_meta)
open_mock.assert_called_once_with(mock.sentinel.dst_path, 'rb')
get_tran_mock.assert_called_once_with('rbd')
# no client call, chunks were read right after xfer_mod.download:
client.call.assert_not_called()
# verifier called with the value we got from rbd download
verifier.update.assert_has_calls(
[
mock.call("rbd1"),
mock.call("rbd2")
]
)
verifier.verify.assert_called()
log_mock.info.assert_has_calls(
[
mock.call('Successfully transferred using %s', 'rbd'),
mock.call(
'Image signature verification succeeded for image %s',
mock.sentinel.image_id)
]
)
# not opened for writing (already written)
self.assertFalse(open_mock(mock.sentinel.dst_path, 'rw').called)
# write not called (written by rbd download)
writer.write.assert_not_called()
@mock.patch('nova.image.glance.GlanceImageServiceV2._get_transfer_method')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('nova.image.glance.GlanceImageServiceV2._safe_fsync')
def test_download_direct_exception_fallback_v2(
self, fsync_mock, show_mock, get_tran_mock):
# Test that we fall back to downloading to the dst_path
# if the download method of the transfer module raised
# an exception.
self.flags(allowed_direct_url_schemes=['file'], group='glance')
show_mock.return_value = {
'locations': [
{
'url': 'file:///files/image',
'metadata': mock.sentinel.loc_meta
}
]
}
tran_method = mock.MagicMock()
tran_method.side_effect = Exception
get_tran_mock.return_value = tran_method
client = mock.MagicMock()
client.call.return_value = fake_glance_response([1, 2, 3])
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
with mock.patch('builtins.open') as open_mock:
open_mock.return_value = writer
service = glance.GlanceImageServiceV2(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertIsNone(res)
show_mock.assert_called_once_with(ctx,
mock.sentinel.image_id,
include_locations=True)
get_tran_mock.assert_called_once_with('file')
tran_method.assert_called_once_with(ctx, mock.ANY,
mock.sentinel.dst_path,
mock.sentinel.loc_meta)
client.call.assert_called_once_with(
ctx, 2, 'data', args=(mock.sentinel.image_id,))
fsync_mock.assert_called_once_with(writer)
# NOTE(jaypipes): log messages call open() in part of the
# download path, so here, we just check that the last open()
# call was done for the dst_path file descriptor.
open_mock.assert_called_with(mock.sentinel.dst_path, 'wb')
self.assertIsNone(res)
writer.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
@mock.patch('nova.image.glance.GlanceImageServiceV2._get_transfer_method')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('nova.image.glance.GlanceImageServiceV2._safe_fsync')
def test_download_direct_no_mod_fallback(
self, fsync_mock, show_mock, get_tran_mock):
# Test that we fall back to downloading to the dst_path
# if no appropriate transfer module is found...
# an exception.
self.flags(allowed_direct_url_schemes=['funky'], group='glance')
show_mock.return_value = {
'locations': [
{
'url': 'file:///files/image',
'metadata': mock.sentinel.loc_meta
}
]
}
get_tran_mock.return_value = None
client = mock.MagicMock()
client.call.return_value = fake_glance_response([1, 2, 3])
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
with mock.patch('builtins.open') as open_mock:
open_mock.return_value = writer
service = glance.GlanceImageServiceV2(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertIsNone(res)
show_mock.assert_called_once_with(ctx,
mock.sentinel.image_id,
include_locations=True)
get_tran_mock.assert_called_once_with('file')
client.call.assert_called_once_with(
ctx, 2, 'data', args=(mock.sentinel.image_id,))
fsync_mock.assert_called_once_with(writer)
# NOTE(jaypipes): log messages call open() in part of the
# download path, so here, we just check that the last open()
# call was done for the dst_path file descriptor.
open_mock.assert_called_with(mock.sentinel.dst_path, 'wb')
self.assertIsNone(res)
writer.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
writer.close.assert_called_once_with()
class TestDownloadSignatureVerification(test.NoDBTestCase):
class MockVerifier(object):
def update(self, data):
return
def verify(self):
return True
class BadVerifier(object):
def update(self, data):
return
def verify(self):
raise cryptography.exceptions.InvalidSignature(
'Invalid signature.'
)
def setUp(self):
super(TestDownloadSignatureVerification, self).setUp()
self.flags(verify_glance_signatures=True, group='glance')
self.fake_img_props = {
'properties': {
'img_signature': 'signature',
'img_signature_hash_method': 'SHA-224',
'img_signature_certificate_uuid': uuids.img_sig_cert_uuid,
'img_signature_key_type': 'RSA-PSS',
}
}
self.fake_img_data = ['A' * 256, 'B' * 256]
self.client = mock.MagicMock()
self.client.call.return_value = fake_glance_response(
self.fake_img_data)
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('cursive.signature_utils.get_verifier')
def test_download_with_signature_verification_v2(self,
mock_get_verifier,
mock_show,
mock_log):
service = glance.GlanceImageServiceV2(self.client)
mock_get_verifier.return_value = self.MockVerifier()
mock_show.return_value = self.fake_img_props
image_id = None
res = service.download(context=None, image_id=image_id,
data=None, dst_path=None)
self.assertEqual(self.fake_img_data, res)
mock_get_verifier.assert_called_once_with(
context=None,
img_signature_certificate_uuid=uuids.img_sig_cert_uuid,
img_signature_hash_method='SHA-224',
img_signature='signature',
img_signature_key_type='RSA-PSS'
)
# trusted_certs is None and enable_certificate_validation is
# false, which causes the below debug message to occur
msg = ('Certificate validation was not performed. A list of '
'trusted image certificate IDs must be provided in '
'order to validate an image certificate.')
mock_log.debug.assert_called_once_with(msg)
msg = ('Image signature verification succeeded for image %s')
mock_log.info.assert_called_once_with(msg, image_id)
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('cursive.signature_utils.get_verifier')
@mock.patch('nova.image.glance.GlanceImageServiceV2._safe_fsync')
def test_download_dst_path_signature_verification_v2(self,
mock_fsync,
mock_get_verifier,
mock_show,
mock_log,
mock_open):
service = glance.GlanceImageServiceV2(self.client)
mock_get_verifier.return_value = self.MockVerifier()
mock_show.return_value = self.fake_img_props
mock_dest = mock.MagicMock()
fake_path = 'FAKE_PATH'
mock_open.return_value = mock_dest
service.download(context=None, image_id=None,
data=None, dst_path=fake_path)
mock_get_verifier.assert_called_once_with(
context=None,
img_signature_certificate_uuid=uuids.img_sig_cert_uuid,
img_signature_hash_method='SHA-224',
img_signature='signature',
img_signature_key_type='RSA-PSS'
)
msg = ('Certificate validation was not performed. A list of '
'trusted image certificate IDs must be provided in '
'order to validate an image certificate.')
mock_log.debug.assert_called_once_with(msg)
msg = ('Image signature verification succeeded for image %s')
mock_log.info.assert_called_once_with(msg, None)
self.assertEqual(len(self.fake_img_data), mock_dest.write.call_count)
self.assertTrue(mock_dest.close.called)
mock_fsync.assert_called_once_with(mock_dest)
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('cursive.signature_utils.get_verifier')
def test_download_with_get_verifier_failure_v2(self,
mock_get,
mock_show,
mock_log):
service = glance.GlanceImageServiceV2(self.client)
mock_get.side_effect = cursive_exception.SignatureVerificationError(
reason='Signature verification failed.'
)
mock_show.return_value = self.fake_img_props
self.assertRaises(cursive_exception.SignatureVerificationError,
service.download,
context=None, image_id=None,
data=None, dst_path=None)
mock_log.error.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('cursive.signature_utils.get_verifier')
def test_download_with_invalid_signature_v2(self,
mock_get_verifier,
mock_show,
mock_log):
service = glance.GlanceImageServiceV2(self.client)
mock_get_verifier.return_value = self.BadVerifier()
mock_show.return_value = self.fake_img_props
self.assertRaises(cryptography.exceptions.InvalidSignature,
service.download,
context=None, image_id=None,
data=None, dst_path=None)
mock_log.error.assert_called_once_with(mock.ANY, mock.ANY)
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
def test_download_missing_signature_metadata_v2(self,
mock_show,
mock_log):
service = glance.GlanceImageServiceV2(self.client)
mock_show.return_value = {'properties': {}}
self.assertRaisesRegex(cursive_exception.SignatureVerificationError,
'Required image properties for signature '
'verification do not exist. Cannot verify '
'signature. Missing property: .*',
service.download,
context=None, image_id=None,
data=None, dst_path=None)
@mock.patch('builtins.open')
@mock.patch('cursive.signature_utils.get_verifier')
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('nova.image.glance.GlanceImageServiceV2._safe_fsync')
def test_download_dst_path_signature_fail_v2(self, mock_fsync,
mock_show, mock_log,
mock_get_verifier,
mock_open):
service = glance.GlanceImageServiceV2(self.client)
mock_get_verifier.return_value = self.BadVerifier()
mock_dest = mock.MagicMock()
fake_path = 'FAKE_PATH'
mock_open.return_value = mock_dest
mock_show.return_value = self.fake_img_props
self.assertRaises(cryptography.exceptions.InvalidSignature,
service.download,
context=None, image_id=None,
data=None, dst_path=fake_path)
mock_log.error.assert_called_once_with(mock.ANY, mock.ANY)
mock_open.assert_called_once_with(fake_path, 'wb')
mock_fsync.assert_called_once_with(mock_dest)
mock_dest.truncate.assert_called_once_with(0)
self.assertTrue(mock_dest.close.called)
class TestDownloadCertificateValidation(test.NoDBTestCase):
"""Tests the download method of the GlanceImageServiceV2 when
certificate validation is enabled.
"""
def setUp(self):
super(TestDownloadCertificateValidation, self).setUp()
self.flags(enable_certificate_validation=True, group='glance')
self.fake_img_props = {
'properties': {
'img_signature': 'signature',
'img_signature_hash_method': 'SHA-224',
'img_signature_certificate_uuid': uuids.img_sig_cert_uuid,
'img_signature_key_type': 'RSA-PSS',
}
}
self.fake_img_data = ['A' * 256, 'B' * 256]
self.client = mock.MagicMock()
self.client.call.return_value = fake_glance_response(
self.fake_img_data)
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('cursive.certificate_utils.verify_certificate')
@mock.patch('cursive.signature_utils.get_verifier')
def test_download_with_certificate_validation_v2(self,
mock_get_verifier,
mock_verify_certificate,
mock_show,
mock_log):
service = glance.GlanceImageServiceV2(self.client)
mock_show.return_value = self.fake_img_props
fake_cert = uuids.img_sig_cert_uuid
fake_trusted_certs = objects.TrustedCerts(ids=[fake_cert])
res = service.download(context=None, image_id=None,
data=None, dst_path=None,
trusted_certs=fake_trusted_certs)
self.assertEqual(self.fake_img_data, res)
mock_get_verifier.assert_called_once_with(
context=None,
img_signature_certificate_uuid=uuids.img_sig_cert_uuid,
img_signature_hash_method='SHA-224',
img_signature='signature',
img_signature_key_type='RSA-PSS'
)
mock_verify_certificate.assert_called_once_with(
context=None,
certificate_uuid=uuids.img_sig_cert_uuid,
trusted_certificate_uuids=[fake_cert]
)
msg = ('Image signature certificate validation succeeded '
'for certificate: %s')
mock_log.debug.assert_called_once_with(msg, uuids.img_sig_cert_uuid)
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('cursive.certificate_utils.verify_certificate')
@mock.patch('cursive.signature_utils.get_verifier')
def test_download_with_trusted_certs_and_disabled_cert_validation_v2(
self,
mock_get_verifier,
mock_verify_certificate,
mock_show,
mock_log):
self.flags(enable_certificate_validation=False, group='glance')
service = glance.GlanceImageServiceV2(self.client)
mock_show.return_value = self.fake_img_props
fake_cert = uuids.img_sig_cert_uuid
fake_trusted_certs = objects.TrustedCerts(ids=[fake_cert])
res = service.download(context=None, image_id=None,
data=None, dst_path=None,
trusted_certs=fake_trusted_certs)
self.assertEqual(self.fake_img_data, res)
mock_get_verifier.assert_called_once_with(
context=None,
img_signature_certificate_uuid=uuids.img_sig_cert_uuid,
img_signature_hash_method='SHA-224',
img_signature='signature',
img_signature_key_type='RSA-PSS'
)
mock_verify_certificate.assert_called_once_with(
context=None,
certificate_uuid=uuids.img_sig_cert_uuid,
trusted_certificate_uuids=[fake_cert]
)
msg = ('Image signature certificate validation succeeded '
'for certificate: %s')
mock_log.debug.assert_called_once_with(msg, uuids.img_sig_cert_uuid)
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('cursive.certificate_utils.verify_certificate')
@mock.patch('cursive.signature_utils.get_verifier')
def test_download_with_certificate_validation_failure_v2(
self,
mock_get_verifier,
mock_verify_certificate,
mock_show,
mock_log):
service = glance.GlanceImageServiceV2(self.client)
mock_verify_certificate.side_effect = \
cursive_exception.SignatureVerificationError(
reason='Invalid certificate.'
)
mock_show.return_value = self.fake_img_props
bad_trusted_certs = objects.TrustedCerts(ids=['bad_cert_id',
'other_bad_cert_id'])
self.assertRaises(exception.CertificateValidationFailed,
service.download,
context=None, image_id=None,
data=None, dst_path=None,
trusted_certs=bad_trusted_certs)
msg = ('Image signature certificate validation failed for '
'certificate: %s')
mock_log.warning.assert_called_once_with(msg,
uuids.img_sig_cert_uuid)
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('cursive.signature_utils.get_verifier')
def test_download_without_trusted_certs_failure_v2(self,
mock_get_verifier,
mock_show,
mock_log):
# Signature verification needs to be enabled in order to reach the
# checkpoint for trusted_certs. Otherwise, all image signature
# validation will be skipped.
self.flags(verify_glance_signatures=True, group='glance')
service = glance.GlanceImageServiceV2(self.client)
mock_show.return_value = self.fake_img_props
self.assertRaises(exception.CertificateValidationFailed,
service.download,
context=None, image_id=None,
data=None, dst_path=None)
msg = ('Image signature certificate validation enabled, but no '
'trusted certificate IDs were provided. Unable to '
'validate the certificate used to verify the image '
'signature.')
mock_log.warning.assert_called_once_with(msg)
@mock.patch('nova.image.glance.LOG')
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('cursive.signature_utils.get_verifier')
@mock.patch('cursive.certificate_utils.verify_certificate')
def test_get_verifier_without_trusted_certs_use_default_certs(
self, mock_verify_certificate, mock_get_verifier, mock_show,
mock_log):
"""Tests the scenario that trusted_certs is not provided, but
signature and cert verification are enabled, and there are default
certs to use.
"""
self.flags(verify_glance_signatures=True, group='glance')
self.flags(default_trusted_certificate_ids=[uuids.img_sig_cert_uuid],
group='glance')
service = glance.GlanceImageServiceV2(self.client)
mock_show.return_value = self.fake_img_props
service._get_verifier(
mock.sentinel.context, mock.sentinel.image_id, trusted_certs=None)
mock_verify_certificate.assert_called_once_with(
context=mock.sentinel.context,
certificate_uuid=uuids.img_sig_cert_uuid,
trusted_certificate_uuids=[uuids.img_sig_cert_uuid]
)
msg = ('Image signature certificate validation succeeded '
'for certificate: %s')
mock_log.debug.assert_called_once_with(msg, uuids.img_sig_cert_uuid)
class TestIsImageAvailable(test.NoDBTestCase):
"""Tests the internal _is_image_available function."""
class ImageSpecV2(object):
visibility = None
properties = None
def test_auth_token_override(self):
ctx = mock.MagicMock(auth_token=True)
img = mock.MagicMock()
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
self.assertFalse(img.called)
def test_admin_override(self):
ctx = mock.MagicMock(auth_token=False, is_admin=True)
img = mock.MagicMock()
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
self.assertFalse(img.called)
def test_v2_visibility(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False)
# We emulate warlock validation that throws an AttributeError
# if you try to call is_public on an image model returned by
# a call to V2 image.get(). Here, the ImageSpecV2 does not have
# an is_public attribute and MagicMock will throw an AttributeError.
img = mock.MagicMock(visibility='PUBLIC',
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_project_is_owner(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
'owner_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_project_context_matches_project_prop(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
'project_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_no_user_in_props(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertFalse(res)
def test_user_matches_context(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
user_id='123')
props = {
'user_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
class TestRBDDownload(test.NoDBTestCase):
def setUp(self):
super(TestRBDDownload, self).setUp()
loc_url = "rbd://ce2d1ace/images/b86d6d06-faac/snap"
self.url_parts = urlparse.urlparse(loc_url)
self.image_uuid = "b86d6d06-faac"
self.pool_name = "images"
self.snapshot_name = "snap"
@mock.patch('nova.storage.rbd_utils.RBDDriver._check_for_import_failure',
new=mock.Mock())
@mock.patch.object(rbd_utils.RBDDriver, 'export_image')
@mock.patch.object(rbd_utils, 'rbd', new=mock.Mock())
def test_rbd_download_success(self, mock_export_image):
client = mock.MagicMock()
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
service.rbd_download(ctx, self.url_parts, mock.sentinel.dst_path)
# Assert that we attempt to export using the correct rbd pool, volume
# and snapshot given the provided URL
mock_export_image.assert_called_once_with(mock.sentinel.dst_path,
self.image_uuid,
self.snapshot_name,
self.pool_name)
def test_rbd_download_broken_url(self):
client = mock.MagicMock()
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
wrong_url = "http://www.example.com"
wrong_url_parts = urlparse.urlparse(wrong_url)
# Assert InvalidParameterValue is raised when we can't parse the URL
self.assertRaises(
exception.InvalidParameterValue, service.rbd_download, ctx,
wrong_url_parts, mock.sentinel.dst_path)
@mock.patch('nova.storage.rbd_utils.RBDDriver._check_for_import_failure',
new=mock.Mock())
@mock.patch('nova.storage.rbd_utils.RBDDriver.export_image')
@mock.patch.object(rbd_utils, 'rbd', new=mock.Mock())
def test_rbd_download_export_failure(self, mock_export_image):
client = mock.MagicMock()
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
mock_export_image.side_effect = Exception
# Assert CouldNotFetchImage is raised when the export fails
self.assertRaisesRegex(
exception.CouldNotFetchImage, self.image_uuid,
service.rbd_download, ctx, self.url_parts, mock.sentinel.dst_path)
class TestShow(test.NoDBTestCase):
"""Tests the show method of the GlanceImageServiceV2."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_success_v2(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = True
trans_from_mock.return_value = {'mock': mock.sentinel.trans_from}
client = mock.MagicMock()
client.call.return_value = {}
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
info = service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(
ctx, 2, 'get', args=(mock.sentinel.image_id,))
is_avail_mock.assert_called_once_with(ctx, {})
trans_from_mock.assert_called_once_with({}, include_locations=False)
self.assertIn('mock', info)
self.assertEqual(mock.sentinel.trans_from, info['mock'])
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_not_available_v2(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = False
client = mock.MagicMock()
client.call.return_value = mock.sentinel.images_0
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
with testtools.ExpectedException(exception.ImageNotFound):
service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(
ctx, 2, 'get', args=(mock.sentinel.image_id,))
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
self.assertFalse(trans_from_mock.called)
@mock.patch('nova.image.glance._reraise_translated_image_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_client_failure_v2(self, is_avail_mock, trans_from_mock,
reraise_mock):
raised = exception.ImageNotAuthorized(image_id=123)
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageServiceV2(client)
with testtools.ExpectedException(exception.ImageNotAuthorized):
service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(
ctx, 2, 'get', args=(mock.sentinel.image_id,))
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with(mock.sentinel.image_id)
@mock.patch.object(schemas, 'Schema', side_effect=FakeSchema)
@mock.patch('nova.image.glance._is_image_available')
def test_show_queued_image_without_some_attrs_v2(self, is_avail_mock,
mocked_schema):
is_avail_mock.return_value = True
client = mock.MagicMock()
# fake image cls without disk_format, container_format, name attributes
class fake_image_cls(dict):
pass
glance_image = fake_image_cls(
id = 'b31aa5dd-f07a-4748-8f15-398346887584',
deleted = False,
protected = False,
min_disk = 0,
created_at = '2014-05-20T08:16:48',
size = 0,
status = 'queued',
visibility = 'private',
min_ram = 0,
owner = '980ec4870033453ead65c0470a78b8a8',
updated_at = '2014-05-20T08:16:48',
schema = '')
glance_image.id = glance_image['id']
glance_image.schema = ''
client.call.return_value = glance_image
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
image_info = service.show(ctx, glance_image.id)
client.call.assert_called_once_with(
ctx, 2, 'get', args=(glance_image.id,))
NOVA_IMAGE_ATTRIBUTES = set(['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public',
'properties'])
self.assertEqual(NOVA_IMAGE_ATTRIBUTES, set(image_info.keys()))
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_include_locations_success_v2(self, avail_mock, trans_from_mock):
locations = [mock.sentinel.loc1]
avail_mock.return_value = True
trans_from_mock.return_value = {'locations': locations}
client = mock.Mock()
client.call.return_value = mock.sentinel.image
service = glance.GlanceImageServiceV2(client)
ctx = mock.sentinel.ctx
image_id = mock.sentinel.image_id
info = service.show(ctx, image_id, include_locations=True)
client.call.assert_called_once_with(
ctx, 2, 'get', args=(image_id,))
avail_mock.assert_called_once_with(ctx, mock.sentinel.image)
trans_from_mock.assert_called_once_with(mock.sentinel.image,
include_locations=True)
self.assertIn('locations', info)
self.assertEqual(locations, info['locations'])
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_include_direct_uri_success_v2(self, avail_mock, trans_from_mock):
locations = [mock.sentinel.loc1]
avail_mock.return_value = True
trans_from_mock.return_value = {'locations': locations,
'direct_uri': mock.sentinel.duri}
client = mock.Mock()
client.call.return_value = mock.sentinel.image
service = glance.GlanceImageServiceV2(client)
ctx = mock.sentinel.ctx
image_id = mock.sentinel.image_id
info = service.show(ctx, image_id, include_locations=True)
client.call.assert_called_once_with(
ctx, 2, 'get', args=(image_id,))
expected = locations
expected.append({'url': mock.sentinel.duri, 'metadata': {}})
self.assertIn('locations', info)
self.assertEqual(expected, info['locations'])
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_do_not_show_deleted_images_v2(
self, is_avail_mock, trans_from_mock):
class fake_image_cls(dict):
id = 'b31aa5dd-f07a-4748-8f15-398346887584'
deleted = True
glance_image = fake_image_cls()
client = mock.MagicMock()
client.call.return_value = glance_image
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
with testtools.ExpectedException(exception.ImageNotFound):
service.show(ctx, glance_image.id, show_deleted=False)
client.call.assert_called_once_with(
ctx, 2, 'get', args=(glance_image.id,))
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
class TestDetail(test.NoDBTestCase):
"""Tests the detail method of the GlanceImageServiceV2."""
@mock.patch('nova.image.glance._extract_query_params_v2')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_success_available_v2(self, is_avail_mock, trans_from_mock,
ext_query_mock):
params = {}
is_avail_mock.return_value = True
ext_query_mock.return_value = params
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 2, 'list', kwargs={})
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
trans_from_mock.assert_called_once_with(mock.sentinel.images_0)
self.assertEqual([mock.sentinel.trans_from], images)
@mock.patch('nova.image.glance._extract_query_params_v2')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_success_unavailable_v2(
self, is_avail_mock, trans_from_mock, ext_query_mock):
params = {}
is_avail_mock.return_value = False
ext_query_mock.return_value = params
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 2, 'list', kwargs={})
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
self.assertFalse(trans_from_mock.called)
self.assertEqual([], images)
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_params_passed_v2(self, is_avail_mock, _trans_from_mock):
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
service.detail(ctx, page_size=5, limit=10)
client.call.assert_called_once_with(
ctx, 2, 'list', kwargs=dict(filters={}, page_size=5, limit=10))
@mock.patch('nova.image.glance._reraise_translated_exception')
@mock.patch('nova.image.glance._extract_query_params_v2')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_client_failure_v2(self, is_avail_mock, trans_from_mock,
ext_query_mock, reraise_mock):
params = {}
ext_query_mock.return_value = params
raised = exception.Forbidden()
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageServiceV2(client)
with testtools.ExpectedException(exception.Forbidden):
service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 2, 'list', kwargs={})
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with()
class TestCreate(test.NoDBTestCase):
"""Tests the create method of the GlanceImageServiceV2."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_success_v2(
self, trans_to_mock, trans_from_mock):
translated = {
'name': mock.sentinel.name,
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = {}
client = mock.MagicMock()
client.call.return_value = {'id': '123'}
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
image_meta = service.create(ctx, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
# Verify that the 'id' element has been removed as a kwarg to
# the call to glanceclient's update (since the image ID is
# supplied as a positional arg), and that the
# purge_props default is True.
client.call.assert_called_once_with(
ctx, 2, 'create', kwargs=dict(name=mock.sentinel.name))
trans_from_mock.assert_called_once_with({'id': '123'})
self.assertEqual(mock.sentinel.trans_from, image_meta)
# Now verify that if we supply image data to the call,
# that the client is also called with the data kwarg
client.reset_mock()
client.call.return_value = {'id': mock.sentinel.image_id}
service.create(ctx, {}, data=mock.sentinel.data)
self.assertEqual(3, client.call.call_count)
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_success_v2_force_activate(
self, trans_to_mock, trans_from_mock):
"""Tests that creating an image with the v2 API with a size of 0 will
trigger a call to set the disk and container formats.
"""
translated = {
'name': mock.sentinel.name,
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
# size=0 will trigger force_activate=True
image_mock = {'size': 0}
client = mock.MagicMock()
client.call.return_value = {'id': '123'}
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
with mock.patch.object(service,
'_get_image_create_disk_format_default',
return_value='vdi'):
image_meta = service.create(ctx, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
# Verify that the disk_format and container_format kwargs are passed.
create_call_kwargs = client.call.call_args_list[0][1]['kwargs']
self.assertEqual('vdi', create_call_kwargs['disk_format'])
self.assertEqual('bare', create_call_kwargs['container_format'])
trans_from_mock.assert_called_once_with({'id': '123'})
self.assertEqual(mock.sentinel.trans_from, image_meta)
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_success_v2_with_location(
self, trans_to_mock, trans_from_mock):
translated = {
'id': mock.sentinel.id,
'name': mock.sentinel.name,
'location': mock.sentinel.location
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = {}
client = mock.MagicMock()
client.call.return_value = translated
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
image_meta = service.create(ctx, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
self.assertEqual(2, client.call.call_count)
trans_from_mock.assert_called_once_with(translated)
self.assertEqual(mock.sentinel.trans_from, image_meta)
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_success_v2_with_sharing(
self, trans_to_mock, trans_from_mock):
"""Tests creating a snapshot image by one tenant that is shared with
the owner of the instance.
"""
translated = {
'name': mock.sentinel.name,
'visibility': 'shared'
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_meta = {
'name': mock.sentinel.name,
'visibility': 'shared',
'properties': {
# This triggers the image_members.create call to glance.
'instance_owner': uuids.instance_uuid
}
}
client = mock.MagicMock()
def fake_call(_ctxt, _version, method, controller=None, args=None,
kwargs=None):
if method == 'create':
if controller is None:
# Call to create the image.
translated['id'] = uuids.image_id
return translated
if controller == 'image_members':
self.assertIsNotNone(args)
self.assertEqual(
(uuids.image_id, uuids.instance_uuid), args)
# Call to share the image with the instance owner.
return mock.sentinel.member
self.fail('Unexpected glanceclient call %s.%s' %
(controller or 'images', method))
client.call.side_effect = fake_call
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
ret_image = service.create(ctx, image_meta)
translated_image_meta = copy.copy(image_meta)
# The instance_owner property should have been popped off and not sent
# to glance during the create() call.
translated_image_meta['properties'].pop('instance_owner', None)
trans_to_mock.assert_called_once_with(translated_image_meta)
# glanceclient should be called twice:
# - once for the image create
# - once for sharing the image with the instance owner
self.assertEqual(2, client.call.call_count)
trans_from_mock.assert_called_once_with(translated)
self.assertEqual(mock.sentinel.trans_from, ret_image)
@mock.patch('nova.image.glance._reraise_translated_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_client_failure_v2(self, trans_to_mock, trans_from_mock,
reraise_mock):
translated = {}
trans_to_mock.return_value = translated
image_mock = mock.MagicMock(spec=dict)
raised = exception.Invalid()
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.BadRequest
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageServiceV2(client)
self.assertRaises(exception.Invalid, service.create, ctx, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
self.assertFalse(trans_from_mock.called)
def _test_get_image_create_disk_format_default(self,
test_schema,
expected_disk_format):
mock_client = mock.MagicMock()
mock_client.call.return_value = test_schema
service = glance.GlanceImageServiceV2(mock_client)
disk_format = service._get_image_create_disk_format_default(
mock.sentinel.ctx)
self.assertEqual(expected_disk_format, disk_format)
mock_client.call.assert_called_once_with(
mock.sentinel.ctx, 2, 'get', args=('image',), controller='schemas')
def test_get_image_create_disk_format_default_no_schema(self):
"""Tests that if there is no disk_format schema we default to qcow2.
"""
test_schema = FakeSchema({'properties': {}})
self._test_get_image_create_disk_format_default(test_schema, 'qcow2')
def test_get_image_create_disk_format_default_single_entry(self):
"""Tests that if there is only a single supported disk_format then
we use that.
"""
test_schema = FakeSchema({
'properties': {
'disk_format': {
'enum': ['iso'],
}
}
})
self._test_get_image_create_disk_format_default(test_schema, 'iso')
def test_get_image_create_disk_format_default_multiple_entries(self):
"""Tests that if there are multiple supported disk_formats we look for
one in a preferred order.
"""
test_schema = FakeSchema({
'properties': {
'disk_format': {
# For this test we want to skip qcow2 since that's primary.
'enum': ['vhd', 'raw'],
}
}
})
self._test_get_image_create_disk_format_default(test_schema, 'vhd')
def test_get_image_create_disk_format_default_multiple_entries_no_match(
self):
"""Tests that if we can't match a supported disk_format to what we
prefer then we take the first supported disk_format in the list.
"""
test_schema = FakeSchema({
'properties': {
'disk_format': {
# For this test we want to skip qcow2 since that's primary.
'enum': ['aki', 'ari', 'ami'],
}
}
})
self._test_get_image_create_disk_format_default(test_schema, 'aki')
class TestUpdate(test.NoDBTestCase):
"""Tests the update method of the GlanceImageServiceV2."""
@mock.patch('nova.utils.tpool_execute',
side_effect=nova.utils.tpool_execute)
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_update_success_v2(
self, trans_to_mock, trans_from_mock, show_mock, texec_mock):
image = {
'id': mock.sentinel.image_id,
'name': mock.sentinel.name,
'properties': {'prop_to_keep': '4'}
}
translated = {
'id': mock.sentinel.image_id,
'name': mock.sentinel.name,
'prop_to_keep': '4'
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = mock.sentinel.image_meta
ctx = mock.sentinel.ctx
show_mock.return_value = {
'image_id': mock.sentinel.image_id,
'properties': {'prop_to_remove': '1',
'prop_to_keep': '3'}
}
service = glance.GlanceImageServiceV2(client)
image_meta = service.update(
ctx, mock.sentinel.image_id, image, purge_props=True)
show_mock.assert_called_once_with(
mock.sentinel.ctx, mock.sentinel.image_id)
trans_to_mock.assert_called_once_with(image)
# Verify that the 'id' element has been removed as a kwarg to
# the call to glanceclient's update (since the image ID is
# supplied as a positional arg), and that the
# purge_props default is True.
client.call.assert_called_once_with(
ctx, 2, 'update', kwargs=dict(
image_id=mock.sentinel.image_id, name=mock.sentinel.name,
prop_to_keep='4', remove_props=['prop_to_remove'],
))
trans_from_mock.assert_called_once_with(mock.sentinel.image_meta)
self.assertEqual(mock.sentinel.trans_from, image_meta)
# Now verify that if we supply image data to the call,
# that the client is also called with the data kwarg
client.reset_mock()
client.call.return_value = {'id': mock.sentinel.image_id}
service.update(ctx, mock.sentinel.image_id, {},
data=mock.sentinel.data)
self.assertEqual(3, client.call.call_count)
texec_mock.assert_called_once_with(
client.call, ctx, 2, 'upload',
args=(mock.sentinel.image_id,
mock.sentinel.data))
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_update_success_v2_with_location(
self, trans_to_mock, trans_from_mock, show_mock):
translated = {
'id': mock.sentinel.id,
'name': mock.sentinel.name,
'location': mock.sentinel.location
}
show_mock.return_value = {'image_id': mock.sentinel.image_id}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = mock.MagicMock(spec=dict)
client = mock.MagicMock()
client.call.return_value = translated
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
image_meta = service.update(ctx, mock.sentinel.image_id,
image_mock, purge_props=False)
trans_to_mock.assert_called_once_with(image_mock)
self.assertEqual(2, client.call.call_count)
trans_from_mock.assert_called_once_with(translated)
self.assertEqual(mock.sentinel.trans_from, image_meta)
@mock.patch('nova.image.glance.GlanceImageServiceV2.show')
@mock.patch('nova.image.glance._reraise_translated_image_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_update_client_failure_v2(self, trans_to_mock, trans_from_mock,
reraise_mock, show_mock):
image = {
'id': mock.sentinel.image_id,
'name': mock.sentinel.name,
'properties': {'prop_to_keep': '4'}
}
translated = {
'id': mock.sentinel.image_id,
'name': mock.sentinel.name,
'prop_to_keep': '4'
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
raised = exception.ImageNotAuthorized(image_id=123)
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
show_mock.return_value = {
'image_id': mock.sentinel.image_id,
'properties': {'prop_to_remove': '1',
'prop_to_keep': '3'}
}
service = glance.GlanceImageServiceV2(client)
self.assertRaises(exception.ImageNotAuthorized,
service.update, ctx, mock.sentinel.image_id,
image)
client.call.assert_called_once_with(
ctx, 2, 'update', kwargs=dict(
image_id=mock.sentinel.image_id,
name=mock.sentinel.name,
prop_to_keep='4',
remove_props=['prop_to_remove'],
))
reraise_mock.assert_called_once_with(mock.sentinel.image_id)
class TestDelete(test.NoDBTestCase):
"""Tests the delete method of the GlanceImageServiceV2."""
def test_delete_success_v2(self):
client = mock.MagicMock()
client.call.return_value = True
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
service.delete(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(
ctx, 2, 'delete', args=(mock.sentinel.image_id,))
def test_delete_client_failure_v2(self):
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.NotFound
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
self.assertRaises(exception.ImageNotFound, service.delete, ctx,
mock.sentinel.image_id)
def test_delete_client_conflict_failure_v2(self):
client = mock.MagicMock()
fake_details = 'Image %s is in use' % mock.sentinel.image_id
client.call.side_effect = glanceclient.exc.HTTPConflict(
details=fake_details)
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
self.assertRaises(exception.ImageDeleteConflict, service.delete, ctx,
mock.sentinel.image_id)
@ddt.ddt
class TestGlanceApiServers(test.NoDBTestCase):
def test_get_api_servers_multiple(self):
"""Test get_api_servers via `api_servers` conf option."""
glance_servers = ['http://10.0.1.1:9292',
'https://10.0.0.1:9293',
'http://10.0.2.2:9294']
expected_servers = set(glance_servers)
self.flags(api_servers=glance_servers, group='glance')
api_servers = glance.get_api_servers('context')
# In len(expected_servers) cycles, we should get all the endpoints
self.assertEqual(expected_servers,
{next(api_servers) for _ in expected_servers})
@ddt.data(['http://158.69.92.100/image/v2/',
'http://158.69.92.100/image/'],
['http://158.69.92.100/image/v2',
'http://158.69.92.100/image/'],
['http://158.69.92.100/image/v2.0/',
'http://158.69.92.100/image/'],
['http://158.69.92.100/image/',
'http://158.69.92.100/image/'],
['http://158.69.92.100/image',
'http://158.69.92.100/image'],
['http://158.69.92.100/v2',
'http://158.69.92.100/'],
['http://thing.novav2.0oh.v2.foo/image/v2/',
'http://thing.novav2.0oh.v2.foo/image/'])
@ddt.unpack
def test_get_api_servers_get_ksa_adapter(self, catalog_url, stripped):
"""Test get_api_servers via nova.utils.get_ksa_adapter()."""
self.flags(api_servers=None, group='glance')
with mock.patch('keystoneauth1.adapter.Adapter.'
'get_endpoint_data') as mock_epd:
mock_epd.return_value.catalog_url = catalog_url
api_servers = glance.get_api_servers(mock.Mock())
self.assertEqual(stripped, next(api_servers))
# Still get itertools.cycle behavior
self.assertEqual(stripped, next(api_servers))
mock_epd.assert_called_once_with()
@mock.patch('keystoneauth1.adapter.Adapter.get_endpoint_data')
def test_get_api_servers_get_ksa_adapter_endpoint_override(self,
mock_epd):
self.flags(endpoint_override='foo', group='glance')
api_servers = glance.get_api_servers(mock.Mock())
self.assertEqual('foo', next(api_servers))
self.assertEqual('foo', next(api_servers))
mock_epd.assert_not_called()
class TestUpdateGlanceImage(test.NoDBTestCase):
@mock.patch('nova.image.glance.GlanceImageServiceV2')
def test_start(self, mock_glance_image_service):
consumer = glance.UpdateGlanceImage(
'context', 'id', 'metadata', 'stream')
with mock.patch.object(glance, 'get_remote_image_service') as a_mock:
a_mock.return_value = (mock_glance_image_service, 'image_id')
consumer.start()
mock_glance_image_service.update.assert_called_with(
'context', 'image_id', 'metadata', 'stream', purge_props=False)
class TestExtractAttributes(test.NoDBTestCase):
@mock.patch.object(schemas, 'Schema', side_effect=FakeSchema)
def test_extract_image_attributes_active_images_with_locations(
self, mocked_schema):
image_v2 = ImageV2(image_fixtures['active_image_v2'])
image_v2_meta = glance._translate_from_glance(
image_v2, include_locations=True)
self.assertIn('locations', image_v2_meta)
self.assertIn('direct_url', image_v2_meta)
image_v2_meta = glance._translate_from_glance(
image_v2, include_locations=False)
self.assertNotIn('locations', image_v2_meta)
self.assertNotIn('direct_url', image_v2_meta)
class TestExtractQueryParams(test.NoDBTestCase):
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_extract_query_params_v2(
self, is_avail_mock, _trans_from_mock):
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
input_filters = {
'property-kernel-id': 'some-id',
'changes-since': 'some-date',
'is_public': 'true',
'name': 'some-name'
}
service.detail(ctx, filters=input_filters, page_size=5, limit=10)
expected_filters_v1 = {'visibility': 'public',
'name': 'some-name',
'kernel-id': 'some-id',
'updated_at': 'gte:some-date'}
client.call.assert_called_once_with(
ctx, 2, 'list', kwargs=dict(
filters=expected_filters_v1,
page_size=5,
limit=10,
))
class TestTranslateToGlance(test.NoDBTestCase):
"""Test that image was translated correct to be accepted by Glance"""
def setUp(self):
self.fixture = {
'checksum': 'fb10c6486390bec8414be90a93dfff3b',
'container_format': 'bare',
'created_at': "",
'deleted': False,
'deleted_at': None,
'disk_format': 'raw',
'id': 'f8116538-309f-449c-8d49-df252a97a48d',
'is_public': True,
'min_disk': '0',
'min_ram': '0',
'name': 'tempest-image-1294122904',
'owner': 'd76b51cf8a44427ea404046f4c1d82ab',
'properties':
{'os_distro': 'value2', 'os_version': 'value1',
'base_image_ref': 'ea36315c-e527-4643-a46a-9fd61d027cc1',
'image_type': 'test',
'instance_uuid': 'ec1ea9c7-8c5e-498d-a753-6ccc2464123c',
'kernel_id': 'None',
'ramdisk_id': ' ',
'user_id': 'ca2ff78fd33042ceb45fbbe19012ef3f',
'boolean_prop': True},
'size': 1024,
'status': 'active',
'updated_at': ""}
super(TestTranslateToGlance, self).setUp()
def test_convert_to_v2(self):
expected_v2_image = {
'base_image_ref': 'ea36315c-e527-4643-a46a-9fd61d027cc1',
'boolean_prop': 'True',
'checksum': 'fb10c6486390bec8414be90a93dfff3b',
'container_format': 'bare',
'disk_format': 'raw',
'id': 'f8116538-309f-449c-8d49-df252a97a48d',
'image_type': 'test',
'instance_uuid': 'ec1ea9c7-8c5e-498d-a753-6ccc2464123c',
'min_disk': 0,
'min_ram': 0,
'name': 'tempest-image-1294122904',
'os_distro': 'value2',
'os_version': 'value1',
'owner': 'd76b51cf8a44427ea404046f4c1d82ab',
'user_id': 'ca2ff78fd33042ceb45fbbe19012ef3f',
'visibility': 'public'}
nova_image_dict = self.fixture
image_v2_dict = glance._translate_to_glance(nova_image_dict)
self.assertEqual(expected_v2_image, image_v2_dict)
@mock.patch('stat.S_ISSOCK')
@mock.patch('stat.S_ISFIFO')
@mock.patch('os.fsync')
@mock.patch('os.fstat')
class TestSafeFSync(test.NoDBTestCase):
"""Validate _safe_fsync."""
@staticmethod
def common(mock_isfifo, isfifo, mock_issock, issock, mock_fstat):
"""Execution & assertions common to all test cases."""
fh = mock.Mock()
mock_isfifo.return_value = isfifo
mock_issock.return_value = issock
glance.GlanceImageServiceV2._safe_fsync(fh)
fh.fileno.assert_called_once_with()
mock_fstat.assert_called_once_with(fh.fileno.return_value)
mock_isfifo.assert_called_once_with(mock_fstat.return_value.st_mode)
# Condition short-circuits, so S_ISSOCK is only called if !S_ISFIFO
if isfifo:
mock_issock.assert_not_called()
else:
mock_issock.assert_called_once_with(
mock_fstat.return_value.st_mode)
return fh
def test_fsync(self, mock_fstat, mock_fsync, mock_isfifo, mock_issock):
"""Validate path where fsync is called."""
fh = self.common(mock_isfifo, False, mock_issock, False, mock_fstat)
mock_fsync.assert_called_once_with(fh.fileno.return_value)
def test_fifo(self, mock_fstat, mock_fsync, mock_isfifo, mock_issock):
"""Validate fsync not called for pipe/fifo."""
self.common(mock_isfifo, True, mock_issock, False, mock_fstat)
mock_fsync.assert_not_called()
def test_sock(self, mock_fstat, mock_fsync, mock_isfifo, mock_issock):
"""Validate fsync not called for socket."""
self.common(mock_isfifo, False, mock_issock, True, mock_fstat)
mock_fsync.assert_not_called()
class TestImportCopy(test.NoDBTestCase):
"""Tests the image import/copy methods."""
def _test_import(self, exception=None):
client = mock.MagicMock()
if exception:
client.call.side_effect = exception
else:
client.call.return_value = True
ctx = mock.sentinel.ctx
service = glance.GlanceImageServiceV2(client)
service.image_import_copy(ctx, mock.sentinel.image_id,
[mock.sentinel.store])
return client
def test_image_import_copy_success(self):
client = self._test_import()
client.call.assert_called_once_with(
mock.sentinel.ctx, 2, 'image_import',
args=(mock.sentinel.image_id,),
kwargs={'method': 'copy-image',
'stores': [mock.sentinel.store]})
def test_image_import_copy_not_found(self):
self.assertRaises(exception.ImageNotFound,
self._test_import,
glanceclient.exc.NotFound)
def test_image_import_copy_not_authorized(self):
self.assertRaises(exception.ImageNotAuthorized,
self._test_import,
glanceclient.exc.HTTPForbidden)
def test_image_import_copy_failed_workflow(self):
self.assertRaises(exception.ImageImportImpossible,
self._test_import,
glanceclient.exc.HTTPConflict)
def test_image_import_copy_failed_already_imported(self):
self.assertRaises(exception.ImageBadRequest,
self._test_import,
glanceclient.exc.HTTPBadRequest)
def test_api(self):
api = glance.API()
with mock.patch.object(api, '_get_session_and_image_id') as g:
session = mock.MagicMock()
g.return_value = session, mock.sentinel.image_id
api.copy_image_to_store(mock.sentinel.ctx,
mock.sentinel.image_id,
mock.sentinel.store)
session.image_import_copy.assert_called_once_with(
mock.sentinel.ctx, mock.sentinel.image_id,
[mock.sentinel.store])
def test_api_to_client(self):
# Test all the way down to the client to test the interface between
# API and GlanceImageServiceV2
wrapper = mock.MagicMock()
client = glance.GlanceImageServiceV2(client=wrapper)
api = glance.API()
with mock.patch.object(api, '_get_session_and_image_id') as m:
m.return_value = (client, mock.sentinel.image_id)
api.copy_image_to_store(mock.sentinel.ctx,
mock.sentinel.image_id,
mock.sentinel.store)
wrapper.call.assert_called_once_with(
mock.sentinel.ctx, 2, 'image_import',
args=(mock.sentinel.image_id,),
kwargs={'method': 'copy-image',
'stores': [mock.sentinel.store]})
|
apache-2.0
|
cjqian/incubator-airflow
|
airflow/migrations/versions/f2ca10b85618_add_dag_stats_table.py
|
58
|
1334
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add dag_stats table
Revision ID: f2ca10b85618
Revises: 64de9cddf6c9
Create Date: 2016-07-20 15:08:28.247537
"""
# revision identifiers, used by Alembic.
revision = 'f2ca10b85618'
down_revision = '64de9cddf6c9'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('dag_stats',
sa.Column('dag_id', sa.String(length=250), nullable=False),
sa.Column('state', sa.String(length=50), nullable=False),
sa.Column('count', sa.Integer(), nullable=False, default=0),
sa.Column('dirty', sa.Boolean(), nullable=False, default=False),
sa.PrimaryKeyConstraint('dag_id', 'state'))
def downgrade():
op.drop_table('dag_stats')
|
apache-2.0
|
gianlucaborello/linux
|
scripts/gdb/linux/symbols.py
|
467
|
6343
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# load kernel and module symbols
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import os
import re
from linux import modules
if hasattr(gdb, 'Breakpoint'):
class LoadModuleBreakpoint(gdb.Breakpoint):
def __init__(self, spec, gdb_command):
super(LoadModuleBreakpoint, self).__init__(spec, internal=True)
self.silent = True
self.gdb_command = gdb_command
def stop(self):
module = gdb.parse_and_eval("mod")
module_name = module['name'].string()
cmd = self.gdb_command
# enforce update if object file is not found
cmd.module_files_updated = False
# Disable pagination while reporting symbol (re-)loading.
# The console input is blocked in this context so that we would
# get stuck waiting for the user to acknowledge paged output.
show_pagination = gdb.execute("show pagination", to_string=True)
pagination = show_pagination.endswith("on.\n")
gdb.execute("set pagination off")
if module_name in cmd.loaded_modules:
gdb.write("refreshing all symbols to reload module "
"'{0}'\n".format(module_name))
cmd.load_all_symbols()
else:
cmd.load_module_symbols(module)
# restore pagination state
gdb.execute("set pagination %s" % ("on" if pagination else "off"))
return False
class LxSymbols(gdb.Command):
"""(Re-)load symbols of Linux kernel and currently loaded modules.
The kernel (vmlinux) is taken from the current working directly. Modules (.ko)
are scanned recursively, starting in the same directory. Optionally, the module
search path can be extended by a space separated list of paths passed to the
lx-symbols command."""
module_paths = []
module_files = []
module_files_updated = False
loaded_modules = []
breakpoint = None
def __init__(self):
super(LxSymbols, self).__init__("lx-symbols", gdb.COMMAND_FILES,
gdb.COMPLETE_FILENAME)
def _update_module_files(self):
self.module_files = []
for path in self.module_paths:
gdb.write("scanning for modules in {0}\n".format(path))
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith(".ko"):
self.module_files.append(root + "/" + name)
self.module_files_updated = True
def _get_module_file(self, module_name):
module_pattern = ".*/{0}\.ko$".format(
module_name.replace("_", r"[_\-]"))
for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name):
return name
return None
def _section_arguments(self, module):
try:
sect_attrs = module['sect_attrs'].dereference()
except gdb.error:
return ""
attrs = sect_attrs['attrs']
section_name_to_address = {
attrs[n]['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
name=section_name, addr=str(address)))
return "".join(args)
def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['core_layout']['base']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
if module_file:
gdb.write("loading @{addr}: {filename}\n".format(
addr=module_addr, filename=module_file))
cmdline = "add-symbol-file {filename} {addr}{sections}".format(
filename=module_file,
addr=module_addr,
sections=self._section_arguments(module))
gdb.execute(cmdline, to_string=True)
if module_name not in self.loaded_modules:
self.loaded_modules.append(module_name)
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
# Dropping symbols will disable all breakpoints. So save their states
# and restore them afterward.
saved_states = []
if hasattr(gdb, 'breakpoints') and not gdb.breakpoints() is None:
for bp in gdb.breakpoints():
saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
# drop all current symbols and reload vmlinux
gdb.execute("symbol-file", to_string=True)
gdb.execute("symbol-file vmlinux")
self.loaded_modules = []
module_list = modules.module_list()
if not module_list:
gdb.write("no modules found\n")
else:
[self.load_module_symbols(module) for module in module_list]
for saved_state in saved_states:
saved_state['breakpoint'].enabled = saved_state['enabled']
def invoke(self, arg, from_tty):
self.module_paths = [os.path.expanduser(p) for p in arg.split()]
self.module_paths.append(os.getcwd())
# enforce update
self.module_files = []
self.module_files_updated = False
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
if self.breakpoint is not None:
self.breakpoint.delete()
self.breakpoint = None
self.breakpoint = LoadModuleBreakpoint(
"kernel/module.c:do_init_module", self)
else:
gdb.write("Note: symbol update on module loading not supported "
"with this gdb version\n")
LxSymbols()
|
gpl-2.0
|
jwir3/andross
|
andross/console.py
|
1
|
1792
|
import argparse
import pkg_resources
import sys
from tosser import Tosser
def createParser():
"""
Create an ArgumentParser object for use within the main method of this
program for command-line use of this script.
"""
version = pkg_resources.require('andross')[0].version
parser = argparse.ArgumentParser(prog='andross', description='''
Move drawable resources already organized into a hierarchy into the
resources folder for a given android application
''', add_help=True)
parser.add_argument('-v', '--version', help='display the version information for %(prog)s', action='version', version='%(prog)s version ' + str(version))
parser.add_argument('-s', '--source', dest='srcPath', metavar='<source path>', default='.', help='path containing the drawable resources to be moved', action='store')
parser.add_argument('appPath', metavar='<app path>', help='path to the android application where the drawable resources should be added', action='store')
return parser
def printVersion():
"""
Use the ArgumentParser object created as part of createParser() to print the
version information about this program.
"""
parser = createParser()
parser.parse_args(['--version'])
def main():
"""
Parse arguments passed on the command line using an ArgumentParser,
construct a new Tosser object from these arguments, and use the
Tosser.tossDrawables() method to move drawable files from one
location in the file system to another.
"""
parser = createParser()
parsedArgs = parser.parse_args(sys.argv[1:])
if not parsedArgs.appPath:
parser.print_help()
tosser = Tosser(parsedArgs.srcPath, parsedArgs.appPath)
tosser.tossDrawables()
if __name__ == '__main__':
exit(main())
|
mpl-2.0
|
indictranstech/Das_Erpnext
|
erpnext/selling/page/sales_funnel/sales_funnel.py
|
56
|
1473
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def get_funnel_data(from_date, to_date):
active_leads = frappe.db.sql("""select count(*) from `tabLead`
where (date(`modified`) between %s and %s)
and status != "Do Not Contact" """, (from_date, to_date))[0][0]
active_leads += frappe.db.sql("""select count(distinct customer) from `tabContact`
where (date(`modified`) between %s and %s)
and status != "Passive" """, (from_date, to_date))[0][0]
opportunities = frappe.db.sql("""select count(*) from `tabOpportunity`
where docstatus = 1 and (date(`creation`) between %s and %s)
and status != "Lost" """, (from_date, to_date))[0][0]
quotations = frappe.db.sql("""select count(*) from `tabQuotation`
where docstatus = 1 and (date(`creation`) between %s and %s)
and status != "Lost" """, (from_date, to_date))[0][0]
sales_orders = frappe.db.sql("""select count(*) from `tabSales Order`
where docstatus = 1 and (date(`creation`) between %s and %s)""", (from_date, to_date))[0][0]
return [
{ "title": "Active Leads / Customers", "value": active_leads, "color": "#B03B46" },
{ "title": "Opportunities", "value": opportunities, "color": "#F09C00" },
{ "title": "Quotations", "value": quotations, "color": "#006685" },
{ "title": "Sales Orders", "value": sales_orders, "color": "#00AD65" }
]
|
agpl-3.0
|
aagallag/nexmon
|
buildtools/gcc-arm-none-eabi-5_4-2016q2-osx/arm-none-eabi/share/gdb/system-gdbinit/elinos.py
|
70
|
3080
|
# Copyright (C) 2011-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Configure GDB using the ELinOS environment."""
import os
import glob
import gdb
def warn(msg):
print "warning: %s" % msg
def get_elinos_environment():
"""Return the ELinOS environment.
If the ELinOS environment is properly set up, return a dictionary
which contains:
* The path to the ELinOS project at key 'project';
* The path to the ELinOS CDK at key 'cdk';
* The ELinOS target name at key 'target' (Eg. 'i486-linux');
* A list of Xenomai install prefixes (which could be empty, if
the ELinOS project does not include Xenomai) at key 'xenomai'.
If one of these cannot be found, print a warning; the corresponding
value in the returned dictionary will be None.
"""
result = {}
for key in ("project", "cdk", "target"):
var = "ELINOS_" + key.upper()
if var in os.environ:
result[key] = os.environ[var]
else:
warn("%s not set" % var)
result[key] = None
if result["project"] is not None:
result["xenomai"] = glob.glob(result["project"] + "/xenomai-[0-9.]*")
else:
result["xenomai"] = []
return result
def elinos_init():
"""Initialize debugger environment for ELinOS.
Let the debugger know where to find the ELinOS libraries on host. This
assumes that an ELinOS environment is properly set up. If some environment
variables are missing, warn about which library may be missing.
"""
elinos_env = get_elinos_environment()
solib_dirs = []
# System libraries
if None in (elinos_env[key] for key in ("cdk", "target")):
warn("ELinOS system libraries will not be loaded")
else:
solib_prefix = "%s/%s" % (elinos_env["cdk"], elinos_env["target"])
solib_dirs += ["%s/%s" % (solib_prefix, "lib")]
gdb.execute("set solib-absolute-prefix %s" % solib_prefix)
# Xenomai libraries. Those are optional, so have a lighter warning
# if they cannot be located.
if elinos_env["project"] is None:
warn("Xenomai libraries may not be loaded")
else:
for dir in elinos_env['xenomai']:
solib_dirs += ["%s/%s"
% (dir, "xenomai-build/usr/realtime/lib")]
if len(solib_dirs) != 0:
gdb.execute("set solib-search-path %s" % ":".join(solib_dirs))
if __name__ == "__main__":
elinos_init()
|
gpl-3.0
|
EuropeanSocialInnovationDatabase/ESID-main
|
TextMining/Classifiers/Trainers/ANN_Trainer_Outputs.py
|
1
|
26833
|
import nltk
from os import listdir
from os.path import isfile, join,isdir
import csv
import re
import sklearn.metrics
from keras.callbacks import EarlyStopping
from keras import Input
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.metrics import precision_recall_fscore_support
from sklearn.model_selection import KFold
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Conv1D, MaxPooling1D, Flatten,Conv2D
from keras.preprocessing.text import Tokenizer
from keras.layers import Embedding
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import gensim
import os
import numpy as np
import time
from keras import backend as K
def mcor(y_true, y_pred):
# matthews_correlation
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall))
class DataSet:
Annotators = []
def __init__(self):
self.Annotators = []
class Annotator:
files = []
documents = []
Name = ""
def __init__(self):
self.files = []
self.documents = []
self.Name = ""
class Document:
Lines = []
DocumentName = ""
DatabaseID = ""
Annotations = []
Text = ""
isSpam = False
Project_Mark_Objective_1A = 0
Project_Mark_Objective_1B = 0
Project_Mark_Objective_1C = 0
Project_Mark_Actors_2A = 0
Project_Mark_Actors_2B = 0
Project_Mark_Actors_2C = 0
Project_Mark_Outputs_3A = 0
Project_Mark_Innovativeness_3A = 0
isProjectObjectiveSatisfied = False
isProjectActorSatisfied = False
isProjectOutputSatisfied = False
isProjectInnovativenessSatisfied = False
isProjectObjectiveSatisfied_predicted = False
isProjectActorSatisfied_predicted = False
isProjectOutputSatisfied_predicted = False
isProjectInnovativenessSatisfied_predicted = False
def __init__(self):
self.Text = ""
self.Lines = []
self.DocumentName = ""
self.DatabaseID = ""
self.Annotations = []
self.isSpam = False
self.Project_Mark_Objective_1A = 0
self.Project_Mark_Objective_1B = 0
self.Project_Mark_Objective_1C = 0
self.Project_Mark_Actors_2A = 0
self.Project_Mark_Actors_2B = 0
self.Project_Mark_Actors_2C = 0
self.Project_Mark_Outputs_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.isProjectObjectiveSatisfied = False
self.isProjectActorSatisfied = False
self.isProjectOutputSatisfied = False
self.isProjectInnovativenessSatisfied = False
self.isProjectObjectiveSatisfied_predicted = False
self.isProjectActorSatisfied_predicted = False
self.isProjectOutputSatisfied_predicted = False
self.isProjectInnovativenessSatisfied_predicted = False
class Line:
StartSpan = 0
EndSpan = 0
Text = ""
Sentences = []
Tokens = []
Annotations = []
def __init__(self):
self.StartSpan = 0
self.EndSpan = 0
self.Text = ""
self.Sentences = []
self.Tokens = []
self.Annotations = []
class Sentence:
SentenceText = ""
StartSpan = -1
EndSpan = -1
Annotations = []
def __init__(self):
self.SentenceText = ""
self.StartSpan = -1
self.EndSpan = -1
self.Annotations = []
class Annotation:
FromFile = ""
FromAnnotator = ""
AnnotationText = ""
StartSpan = -1
EndSpan = -1
HighLevelClass = ""
LowLevelClass = ""
if __name__ == '__main__':
os.environ['PYTHONHASHSEED'] = '4'
np.random.seed(523)
max_words = 20000
batch_size = 32
epochs =20
GLOVE_DIR = "../../../Helpers/BratDataProcessing/Glove_dir"
MAX_SEQUENCE_LENGTH = 1100
EMBEDDING_DIM = 50
data_folder = "../../../Helpers/FullDataset_Alina"
ds = DataSet()
total_num_spam = 0
sentences = []
total_num_files = 0
#job = aetros.backend.start_job('nikolamilosevic86/GloveModel')
annotators = [f for f in listdir(data_folder) if isdir(join(data_folder, f))]
for ann in annotators:
folder = data_folder+"/"+ann
Annot = Annotator()
Annot.Name = ann
ds.Annotators.append(Annot)
onlyfiles = [f for f in listdir(folder) if (f.endswith(".txt"))]
for file in onlyfiles:
Annot.files.append(data_folder+"/"+ann+'/'+file)
doc = Document()
total_num_files = total_num_files + 1
doc.Lines = []
#doc.Annotations = []
doc.DocumentName= file
Annot.documents.append(doc)
if(file.startswith('a') or file.startswith('t')):
continue
print file
doc.DatabaseID = file.split("_")[1].split(".")[0]
fl = open(data_folder+"/"+ann+'/'+file,'r')
content = fl.read()
doc.Text = content
lines = content.split('\n')
line_index = 0
for line in lines:
l = Line()
l.StartSpan = line_index
l.EndSpan = line_index+len(line)
l.Text = line
line_index = line_index+len(line)+1
sentences.append(line)
doc.Lines.append(l)
an = open(data_folder+"/"+ann+'/'+file.replace(".txt",".ann"),'r')
annotations = an.readlines()
for a in annotations:
a = re.sub(r'\d+;\d+','',a).replace(' ',' ')
split_ann = a.split('\t')
if (split_ann[0].startswith("T")):
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
low_level_ann = sp_split_ann[0]
if low_level_ann=="ProjectMark":
continue
span_start = sp_split_ann[1]
span_end = sp_split_ann[2]
ann_text = split_ann[2]
Ann = Annotation()
Ann.AnnotationText = ann_text
Ann.StartSpan = int(span_start)
Ann.EndSpan = int(span_end)
Ann.FromAnnotator = Annot.Name
Ann.FromFile = file
Ann.LowLevelClass = low_level_ann
if(low_level_ann == "SL_Outputs_3a"):
Ann.HighLevelClass = "Outputs"
if (low_level_ann == "SL_Objective_1a" or low_level_ann == "SL_Objective_1b" or low_level_ann == "SL_Objective_1c"):
Ann.HighLevelClass = "Objectives"
if (low_level_ann == "SL_Actors_2a" or low_level_ann == "SL_Actors_2b" or low_level_ann == "SL_Actors_2c"):
Ann.HighLevelClass = "Actors"
if (low_level_ann == "SL_Innovativeness_4a"):
Ann.HighLevelClass = "Innovativeness"
doc.Annotations.append(Ann)
for line in doc.Lines:
if line.StartSpan<=Ann.StartSpan and line.EndSpan>=Ann.EndSpan:
line.Annotations.append(Ann)
else:
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
mark_name = sp_split_ann[0]
if (len(sp_split_ann)<=2):
continue
mark = sp_split_ann[2].replace('\n','')
if(mark_name=="DL_Outputs_3a"):
doc.Project_Mark_Outputs_3A = int(mark)
if int(mark)>=2:
doc.isProjectOutputSatisfied = True
if (mark_name == "DL_Objective_1a"):
doc.Project_Mark_Objective_1A = int(mark)
if int(mark)>=2:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1b"):
doc.Project_Mark_Objective_1B = int(mark)
if int(mark)>=2:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1c"):
doc.Project_Mark_Objective_1C = int(mark)
if int(mark)>=2:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Innovativeness_4a"):
doc.Project_Mark_Innovativeness_3A = int(mark)
if int(mark)>=2:
doc.isProjectInnovativenessSatisfied = True
if (mark_name == "DL_Actors_2a"):
doc.Project_Mark_Actors_2A = int(mark)
if int(mark)>=2:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2b"):
doc.Project_Mark_Actors_2B = int(mark)
if int(mark)>=2:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2c"):
doc.Project_Mark_Actors_2C = int(mark)
if int(mark)>=2:
doc.isProjectActorSatisfied = True
if(doc.Project_Mark_Objective_1A==0 and doc.Project_Mark_Objective_1B == 0 and doc.Project_Mark_Objective_1C==0 and doc.Project_Mark_Actors_2A==0
and doc.Project_Mark_Actors_2B==0 and doc.Project_Mark_Actors_2B==0 and doc.Project_Mark_Actors_2C==0 and doc.Project_Mark_Outputs_3A == 0
and doc.Project_Mark_Innovativeness_3A==0):
doc.isSpam = True
total_num_spam = total_num_spam + 1
with open('annotations.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='\"', quoting=csv.QUOTE_MINIMAL)
for ann in ds.Annotators:
for doc in ann.documents:
for annot in doc.Annotations:
spamwriter.writerow([annot.FromFile,annot.FromAnnotator,annot.AnnotationText,annot.LowLevelClass,annot.HighLevelClass,annot.StartSpan,annot.EndSpan])
i = 0
j = i+1
kappa_files = 0
done_documents = []
num_overlap_spam = 0
num_spam = 0
total_objectives = 0
total_outputs = 0
total_actors = 0
total_innovativeness = 0
ann1_annotations_objectives = []
ann2_annotations_objectives = []
ann1_annotations_actors = []
ann2_annotations_actors = []
ann1_annotations_outputs = []
ann2_annotations_outputs = []
ann1_annotations_innovativeness = []
ann2_annotations_innovativeness = []
match_objectives = 0
match_outputs = 0
match_actors = 0
match_innovativeness = 0
while i<len(ds.Annotators)-1:
while j<len(ds.Annotators):
annotator1 = ds.Annotators[i]
annotator2 = ds.Annotators[j]
for doc1 in annotator1.documents:
for doc2 in annotator2.documents:
if doc1.DocumentName == doc2.DocumentName and doc1.DocumentName not in done_documents:
done_documents.append(doc1.DocumentName)
line_num = 0
ann1_objective = [0] * len(doc1.Lines)
ann2_objective = [0] * len(doc2.Lines)
ann1_output = [0] * len(doc1.Lines)
ann2_output = [0] * len(doc2.Lines)
ann1_actor = [0] * len(doc1.Lines)
ann2_actor = [0] * len(doc2.Lines)
ann1_innovativeness = [0] * len(doc1.Lines)
ann2_innovativeness = [0] * len(doc2.Lines)
while line_num<len(doc1.Lines):
if len(doc1.Lines[line_num].Annotations)>0:
for a in doc1.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann1_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann1_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann1_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann1_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
for a1 in doc2.Lines[line_num].Annotations:
if a1.HighLevelClass == a.HighLevelClass:
if a1.HighLevelClass == "Objectives":
match_objectives = match_objectives + 1
if a1.HighLevelClass == "Outputs":
match_outputs = match_outputs + 1
if a1.HighLevelClass == "Actors":
match_actors = match_actors + 1
if a1.HighLevelClass == "Innovativeness":
match_innovativeness = match_innovativeness + 1
if len(doc2.Lines[line_num].Annotations)>0:
for a in doc2.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann2_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann2_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann2_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann2_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
line_num = line_num + 1
ann1_annotations_outputs.extend(ann1_output)
ann2_annotations_outputs.extend(ann2_output)
ann1_annotations_objectives.extend(ann1_objective)
ann2_annotations_objectives.extend(ann2_objective)
ann1_annotations_actors.extend(ann1_actor)
ann2_annotations_actors.extend(ann2_actor)
ann1_annotations_innovativeness.extend(ann1_innovativeness)
ann2_annotations_innovativeness.extend(ann2_innovativeness)
kappa_outputs = sklearn.metrics.cohen_kappa_score(ann1_output,ann2_output)
kappa_objectives = sklearn.metrics.cohen_kappa_score(ann1_objective, ann2_objective)
kappa_actors = sklearn.metrics.cohen_kappa_score(ann1_actor, ann2_actor)
kappa_innovativeness = sklearn.metrics.cohen_kappa_score(ann1_innovativeness, ann2_innovativeness)
print "Statistics for document:"+doc1.DocumentName
print "Annotators "+annotator1.Name+" and "+annotator2.Name
print "Spam by "+annotator1.Name+":"+str(doc1.isSpam)
print "Spam by " + annotator2.Name + ":" + str(doc2.isSpam)
if(doc1.isSpam == doc2.isSpam):
num_overlap_spam = num_overlap_spam+1
if doc1.isSpam:
num_spam = num_spam + 1
if doc2.isSpam:
num_spam = num_spam + 1
print "Cohen Kappa for class Objectives: "+str(kappa_objectives)
print "Cohen Kappa for class Actors: " + str(kappa_actors)
print "Cohen Kappa for class Outputs: " + str(kappa_outputs)
print "Cohen Kappa for class Innovativeness: " + str(kappa_innovativeness)
print "------------------------------------------------------------------"
kappa_files = kappa_files +1
j = j+1
i = i+1
j = i+1
print annotators
doc_array = []
text_array = []
objectives = []
actors = []
outputs = []
innovativeness = []
for ann in ds.Annotators:
for doc in ann.documents:
doc_array.append([doc.Text,doc.isProjectObjectiveSatisfied,doc.isProjectActorSatisfied,doc.isProjectOutputSatisfied,doc.isProjectInnovativenessSatisfied])
objectives.append(doc.isProjectObjectiveSatisfied)
actors.append(doc.isProjectActorSatisfied)
outputs.append(doc.isProjectOutputSatisfied)
innovativeness.append(doc.isProjectInnovativenessSatisfied)
text_array.append(doc.Text)
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(text_array)
sequences = tokenizer.texts_to_sequences(text_array)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(outputs))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set
indices = np.arange(data.shape[0])
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(0.1 * data.shape[0])
# x_train = data
# y_train = labels
total_precision = 0.0
total_recall = 0.0
total_fscore = 0.0
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.50d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
Total_TP = 0
Total_FP = 0
Total_FN = 0
for i in range(1,10):
x_train = np.concatenate((data[0:(i-1)*nb_validation_samples],data[(i-1)*nb_validation_samples+nb_validation_samples:]), axis=0)
y_train = np.concatenate((labels[0:(i-1)*nb_validation_samples],labels[(i-1)*nb_validation_samples+nb_validation_samples:]), axis=0)
x_val = data[(i-1)*nb_validation_samples:(i-1)*nb_validation_samples+nb_validation_samples]
y_val = labels[(i-1)*nb_validation_samples:(i-1)*nb_validation_samples+nb_validation_samples]
print len(x_train)
early_stopping = EarlyStopping(monitor='binary_crossentropy', patience=5)
model = None
model = Sequential()
model.add(embedding_layer)
model.add(Conv1D(64,5,activation='relu'))
model.add(MaxPooling1D(20))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy',
optimizer='nadam',
metrics=['accuracy',mcor,precision,recall, f1])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1,
callbacks=[early_stopping]
)
score = model.evaluate(x_val, y_val,
batch_size=batch_size, verbose=1)
score1 = score[0]
acc1 = score[1]
print('Test score:', score[0])
print('Test accuracy:', score[1])
predictions = model.predict(x_val,batch_size,1)
TP = y_val*predictions
TP_sum = 0
FP_sum = 0
FN_sum = 0
i = 0
for pred in predictions:
print "Prediction: "+str(pred)
print "Y valuation: "+str(y_val[i])
if pred[0] > 0.5 and y_val[i][0] == 1:
TP_sum = TP_sum + 1
if pred[0] > 0.5 and y_val[i][0]==0:
FP_sum = FP_sum + 1
if pred[0] < 0.5 and y_val[i][0]==1:
FN_sum = FN_sum + 1
i = i+1
number_samples = len(predictions)
print "Number of samples:"+str(number_samples)
print "True positives:"+str(TP_sum)
print "False positives:" + str(FP_sum)
print "False negatives:" + str(FN_sum)
Total_TP = Total_TP + TP_sum
Total_FP = Total_FP + FP_sum
Total_FN = Total_FN + FN_sum
precision_s = float(TP_sum)/float(TP_sum+FP_sum)
recall_s = float(TP_sum) / float(TP_sum + FN_sum)
F_score_s = 2.0*precision_s*recall_s/(precision_s+recall_s)
print "Precision: "+str(precision_s)
print "Recall: "+str(recall_s)
print "F1-score: "+str(F_score_s)
total_precision = total_precision + precision_s
total_recall = total_recall + recall_s
total_fscore = total_fscore + F_score_s
X = [""""""]
Y = [1, 0]
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(X)
sequences = tokenizer.texts_to_sequences(X)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
predictions = model.predict(x_val, batch_size, 1)
print predictions
x_train = data
y_train = labels
model = None
model = Sequential()
model.add(embedding_layer)
model.add(Conv1D(64,5,activation='relu'))
model.add(MaxPooling1D(20))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy',
optimizer='nadam',
metrics=['accuracy',mcor,precision,recall, f1])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1,
#callbacks=[early_stopping]
)
model_json = model.to_json()
with open("../Models/model_outputs.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("../Models/model_outputs.h5")
print("Saved model to disk")
print "Overall results"
prec = total_precision/10
print "True positives: "+str(Total_TP)
print "False positives: "+str(Total_FP)
print "False negatives: "+str(Total_FN)
print "Precision:"+str(prec)
rec = total_recall/10
print "Recall:"+str(rec)
f1s = total_fscore/10
print "F1-score:"+str(f1s)
|
gpl-3.0
|
yhoshino11/pytest_example
|
.tox/flake8/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py
|
2040
|
8935
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
mit
|
Tiryoh/mbed
|
workspace_tools/size.py
|
48
|
4171
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from os.path import join, abspath, dirname, exists, splitext
from subprocess import Popen, PIPE
import csv
from collections import defaultdict
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from workspace_tools.paths import BUILD_DIR, TOOLS_DATA
from workspace_tools.settings import GCC_ARM_PATH
from workspace_tools.tests import TEST_MAP
from workspace_tools.build_api import build_mbed_libs, build_project
SIZE = join(GCC_ARM_PATH, 'arm-none-eabi-size')
def get_size(path):
out = Popen([SIZE, path], stdout=PIPE).communicate()[0]
return map(int, out.splitlines()[1].split()[:4])
def get_percentage(before, after):
if before == 0:
return 0 if after == 0 else 100.0
return float(after - before) / float(before) * 100.0
def human_size(val):
if val>1024:
return "%.0fKb" % (float(val)/1024.0)
return "%d" % val
def print_diff(name, before, after):
print "%s: (%s -> %s) %.2f%%" % (name, human_size(before) , human_size(after) , get_percentage(before , after))
BENCHMARKS = [
("BENCHMARK_1", "CENV"),
("BENCHMARK_2", "PRINTF"),
("BENCHMARK_3", "FP"),
("BENCHMARK_4", "MBED"),
("BENCHMARK_5", "ALL"),
]
BENCHMARK_DATA_PATH = join(TOOLS_DATA, 'benchmarks.csv')
def benchmarks():
# CSV Data
csv_data = csv.writer(open(BENCHMARK_DATA_PATH, 'wb'))
csv_data.writerow(['Toolchain', "Target", "Benchmark", "code", "data", "bss", "flash"])
# Build
for toolchain in ['ARM', 'uARM', 'GCC_CR', 'GCC_CS', 'GCC_ARM']:
for mcu in ["LPC1768", "LPC11U24"]:
# Build Libraries
build_mbed_libs(mcu, toolchain)
# Build benchmarks
build_dir = join(BUILD_DIR, "benchmarks", mcu, toolchain)
for test_id, title in BENCHMARKS:
# Build Benchmark
try:
test = TEST_MAP[test_id]
path = build_project(test.source_dir, join(build_dir, test_id),
mcu, toolchain, test.dependencies)
base, ext = splitext(path)
# Check Size
code, data, bss, flash = get_size(base+'.elf')
csv_data.writerow([toolchain, mcu, title, code, data, bss, flash])
except Exception, e:
print "Unable to build %s for toolchain %s targeting %s" % (test_id, toolchain, mcu)
print e
def compare(t1, t2, target):
if not exists(BENCHMARK_DATA_PATH):
benchmarks()
else:
print "Loading: %s" % BENCHMARK_DATA_PATH
data = csv.reader(open(BENCHMARK_DATA_PATH, 'rb'))
benchmarks_data = defaultdict(dict)
for (toolchain, mcu, name, code, data, bss, flash) in data:
if target == mcu:
for t in [t1, t2]:
if toolchain == t:
benchmarks_data[name][t] = map(int, (code, data, bss, flash))
print "%s vs %s for %s" % (t1, t2, target)
for name, data in benchmarks_data.iteritems():
try:
# Check Size
code_a, data_a, bss_a, flash_a = data[t1]
code_u, data_u, bss_u, flash_u = data[t2]
print "\n=== %s ===" % name
print_diff("code", code_a , code_u)
print_diff("data", data_a , data_u)
print_diff("bss", bss_a , bss_u)
print_diff("flash", flash_a , flash_u)
except Exception, e:
print "No data for benchmark %s" % (name)
print e
if __name__ == '__main__':
compare("GCC_CR", "GCC_CS", "LPC1768")
|
apache-2.0
|
ioanadiana/Greengraph
|
greengraph/map.py
|
1
|
1523
|
import numpy as np
from StringIO import StringIO
from matplotlib import image as img
import requests
class Map(object):
def __init__(self, lat, long, satellite=True, zoom=10, size=(400,400), sensor=False):
base="http://maps.googleapis.com/maps/api/staticmap?"
params=dict(
sensor= str(sensor).lower(),
zoom= zoom,
size= "x".join(map(str, size)),
center= ",".join(map(str, (lat, long) )),
style="feature:all|element:labels|visibility:off"
)
if satellite:
params["maptype"]="satellite"
self.image = requests.get(base, params=params).content # Fetch our PNG image data
self.pixels= img.imread(StringIO(self.image)) # Parse our PNG image as a numpy array
def green(self, threshold):
# Use NumPy to build an element-by-element logical array
greener_than_red = self.pixels[:,:,1] > threshold* self.pixels[:,:,0]
greener_than_blue = self.pixels[:,:,1] > threshold*self.pixels[:,:,2]
green = np.logical_and(greener_than_red, greener_than_blue)
return green
def count_green(self, threshold = 1.1):
return np.sum(self.green(threshold))
def show_green(data, threshold = 1.1):
green = self.green(threshold)
out = green[:,:,np.newaxis]*array([0,1,0])[np.newaxis,np.newaxis,:]
buffer = StringIO()
result = img.imsave(buffer, out, format='png')
return buffer.getvalue()
|
gpl-2.0
|
magvugr/AT
|
EntVirtual/lib/python2.7/site-packages/pip/__init__.py
|
145
|
9450
|
#!/usr/bin/env python
import os
import optparse
import sys
import re
from pip.exceptions import InstallationError, CommandError, PipError
from pip.log import logger
from pip.util import get_installed_distributions, get_prog
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import commands, get_summaries, get_similar_commands
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "1.5.4"
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this call
# is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0].lower()
#all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(args_else[0].lower())
if cmd_name not in commands:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
return cmd_name, cmd_args
def main(initial_args=None):
if initial_args is None:
initial_args = sys.argv[1:]
autocomplete()
try:
cmd_name, cmd_args = parseopts(initial_args)
except PipError:
e = sys.exc_info()[1]
sys.stderr.write("ERROR: %s" % e)
sys.stderr.write(os.linesep)
sys.exit(1)
command = commands[cmd_name]()
return command.main(cmd_args)
def bootstrap():
"""
Bootstrapping function to be called from install-pip.py script.
"""
pkgs = ['pip']
try:
import setuptools
except ImportError:
pkgs.append('setuptools')
return main(['install', '--upgrade'] + pkgs + sys.argv[1:])
############################################################
## Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links, find_tags=False):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location, find_tags)
except InstallationError:
ex = sys.exc_info()[1]
logger.warn("Error when trying to get requirement for VCS system %s, falling back to uneditable format" % ex)
req = None
if req is None:
logger.warn('Could not determine repository location of %s' % location)
comments.append('## !! Could not determine repository location')
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] == '=='
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend(
).get_location(dist, dependency_links)
if not svn_location:
logger.warn(
'Warning: cannot find svn location for %s' % req)
comments.append('## FIXME: could not find svn URL in dependency_links for this package:')
else:
comments.append('# Installing as editable to satisfy requirement %s:' % req)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (svn_location, rev, cls.egg_name(dist))
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
exit = main()
if exit:
sys.exit(exit)
|
gpl-3.0
|
mlaurenzano/PEBIL
|
scripts/event_slices.py
|
3
|
5788
|
#!/usr/bin/env python
import getopt
import sys
import os
import struct
import string
# set to 0 for unlimited
sliceLimit = 0
debug = False
verbose = False
uint64 = 'Q'
uint32 = 'I'
uint16 = 'H'
uint8 = 'B'
int64 = 'q'
int32 = 'i'
int16 = 'h'
int8 = 'b'
data_sizes = {}
data_sizes[uint64] = 8
data_sizes[int64] = 8
data_sizes[uint32] = 4
data_sizes[int32] = 4
data_sizes[uint16] = 2
data_sizes[int16] = 2
data_sizes[uint8] = 1
data_sizes[int8] = 1
def print_verbose(s):
if verbose == True:
print s
def print_debug(s):
if debug == True:
print 'debug: ' + s
def print_error(err):
print 'Error: ' + str(err)
sys.exit(1)
def print_usage(err):
print "usage : " + sys.argv[0]
print " --counters <app_slices_file> [--verbose] [--measures <app_measurements_file> --static <static_file>]"
print ""
print "example: " + sys.argv[0] + " --counters cg.B.4.slices_0000.step2 --verbose --measures raw_pmon.master --static NPB3.3-MPI/bin/cg.B.4.step2.static"
print_error(err)
def file_exists(filename):
if os.path.isfile(filename):
return True
return False
def read_unpack(fp, fmt):
ret = 0
try:
ret = struct.unpack(fmt, fp.read(data_sizes[fmt]))[0]
except struct.error, e:
raise EOFError
return ret
def diff_counts(a, b):
if len(a) != len(b):
return -1
for i in range(0,len(a),1):
if a[i] != b[i]:
return i+1
return 0
def merge_list_ranges(lst):
prev = -1
sub = []
merged = []
for item in lst:
if item-prev == 1 or prev == -1:
#print 'match ' + str(item)
# does nothing
prev = item
else:
#print 'no match ' + str(item)
if len(sub) == 1:
merged.append(str(sub[0]))
else:
merged.append(str(sub[0]) + '-' + str(sub[len(sub)-1]))
sub = []
sub.append(item)
prev = item
if len(sub) == 1:
merged.append(str(sub[0]))
elif len(sub) > 0:
merged.append(str(sub[0]) + '-' + str(sub[len(sub)-1]))
return merged
def main():
global verbose
try:
optlist, args = getopt.getopt(sys.argv[1:], '', ['counters=', 'verbose', 'measures=', 'static='])
except getopt.GetoptError, err:
print_usage(err)
sys.exit(1)
if len(args) > 0:
print_usage('extra arguments are invalid: ' + str(args))
sys.exit(1)
counterfile = ''
measurefile = ''
staticfile = ''
for i in range(0,len(optlist),1):
if optlist[i][0] == '--counters':
counterfile = optlist[i][1]
if optlist[i][0] == '--verbose':
verbose = True
if optlist[i][0] == '--measures':
measurefile = optlist[i][1]
if optlist[i][0] == '--static':
staticfile = optlist[i][1]
if counterfile == '':
print_usage('missing switch --ctrs')
if file_exists(counterfile) == False:
print_error('input file ' + counterfile + ' does not exist')
do_measure = False
if measurefile != '' or staticfile != '':
if not (measurefile != '' and staticfile != ''):
print_error('--measures and --static must be used simultaneously')
if file_exists(measurefile) == False:
print_error('input file ' + measurefile + ' does not exist')
if file_exists(staticfile) == False:
print_error('input file ' + staticfile + ' does not exist')
do_measure = True
f = open(counterfile, "rb")
numslices = 0
diffs = 0
diff_list = []
slices = []
try:
# read header
magic = read_unpack(f, uint32)
print_debug('magic number: ' + str(magic))
counters = read_unpack(f, uint32)
print_debug('number of counters: ' + str(counters))
for i in range(8,32):
read_unpack(f, uint8)
prev_counters = []
while True:
counter_counts = [read_unpack(f, uint64) for x in range(0,counters,1)]
counter_diff = diff_counts(prev_counters, counter_counts)
if counter_diff != 0:
diffs += 1
else:
diff_list.append(numslices)
if len(prev_counters) == 0:
slices.append(counter_counts)
else:
slices.append([counter_counts[i] - prev_counters[i] for i in range(len(counter_counts))])
print_verbose('1 ' + string.join([str(x) for x in slices[len(slices)-1]],' '))
prev_counters = counter_counts
numslices += 1
# not really an error... this is kind of a sloppy way of breaking out of the file read loop
except EOFError:
f.close()
print_debug('found ' + str(numslices) + ' slices of size ' + str(len(slices[0])) + ', ' + str(diffs) + ' are different than prev')
print_debug(merge_list_ranges(diff_list))
if do_measure == False:
return
assert(measurefile != '')
assert(staticfile != '')
measures = []
f = open(measurefile, 'r')
raw = f.readlines()
measures = [line.strip().split() for line in raw]
statics = []
f = open(staticfile, 'r')
raw = f.readlines()
for line in raw:
line = line.strip().split()
if len(line) > 0:
if not (line[0].startswith('#') or line[0].startswith('+')):
assert(int(line[0]) == len(statics))
try:
hashcode = int(line[1])
statics.append(hashcode)
except ValueError, e:
print_error('parser error in static file: ' + line)
#print statics
assert(len(statics) == len(slices[0]))
if __name__ == '__main__':
main()
|
gpl-3.0
|
yetu/repotools
|
win_toolchain/get_toolchain_if_necessary.py
|
4
|
9892
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Downloads and unpacks a toolchain for building on Windows. The contents are
matched by sha1 which will be updated when the toolchain is updated.
Having a toolchain script in depot_tools means that it's not versioned
directly with the source code. That is, if the toolchain is upgraded, but
you're trying to build an historical version of Chromium from before the
toolchain upgrade, this will cause you to build with a newer toolchain than
was available when that code was committed. This is done for a two main
reasons: 1) it would likely be annoying to have the up-to-date toolchain
removed and replaced by one without a service pack applied); 2) it would
require maintaining scripts that can build older not-up-to-date revisions of
the toolchain. This is likely to be a poorly tested code path that probably
won't be properly maintained. See http://crbug.com/323300.
This does not extend to major versions of the toolchain however, on the
assumption that there are more likely to be source incompatibilities between
major revisions. This script calls a subscript (currently, toolchain2013.py)
to do the main work. It is expected that toolchain2013.py will always be able
to acquire/build the most current revision of a VS2013-based toolchain. In the
future when a hypothetical VS2015 is released, the 2013 script will be
maintained, and a new 2015 script would be added.
"""
import hashlib
import json
import optparse
import os
import shutil
import subprocess
import sys
import time
BASEDIR = os.path.dirname(os.path.abspath(__file__))
DEPOT_TOOLS_PATH = os.path.join(BASEDIR, '..')
sys.path.append(DEPOT_TOOLS_PATH)
import download_from_google_storage
if sys.platform != 'cygwin':
import ctypes.wintypes
GetFileAttributes = ctypes.windll.kernel32.GetFileAttributesW
GetFileAttributes.argtypes = (ctypes.wintypes.LPWSTR,)
GetFileAttributes.restype = ctypes.wintypes.DWORD
FILE_ATTRIBUTE_HIDDEN = 0x2
FILE_ATTRIBUTE_SYSTEM = 0x4
def IsHidden(file_path):
"""Returns whether the given |file_path| has the 'system' or 'hidden'
attribute set."""
p = GetFileAttributes(file_path)
assert p != 0xffffffff
return bool(p & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM))
def GetFileList(root):
"""Gets a normalized list of files under |root|."""
assert not os.path.isabs(root)
assert os.path.normpath(root) == root
file_list = []
for base, _, files in os.walk(root):
paths = [os.path.join(base, f) for f in files]
file_list.extend(x.lower() for x in paths if not IsHidden(x))
return sorted(file_list)
def MakeTimestampsFileName(root):
return os.path.join(root, '..', '.timestamps')
def CalculateHash(root):
"""Calculates the sha1 of the paths to all files in the given |root| and the
contents of those files, and returns as a hex string."""
file_list = GetFileList(root)
# Check whether we previously saved timestamps in $root/../.timestamps. If
# we didn't, or they don't match, then do the full calculation, otherwise
# return the saved value.
timestamps_file = MakeTimestampsFileName(root)
timestamps_data = {'files': [], 'sha1': ''}
if os.path.exists(timestamps_file):
with open(timestamps_file, 'rb') as f:
try:
timestamps_data = json.load(f)
except ValueError:
# json couldn't be loaded, empty data will force a re-hash.
pass
matches = len(file_list) == len(timestamps_data['files'])
if matches:
for disk, cached in zip(file_list, timestamps_data['files']):
if disk != cached[0] or os.stat(disk).st_mtime != cached[1]:
matches = False
break
if matches:
return timestamps_data['sha1']
digest = hashlib.sha1()
for path in file_list:
digest.update(path)
with open(path, 'rb') as f:
digest.update(f.read())
return digest.hexdigest()
def SaveTimestampsAndHash(root, sha1):
"""Saves timestamps and the final hash to be able to early-out more quickly
next time."""
file_list = GetFileList(root)
timestamps_data = {
'files': [[f, os.stat(f).st_mtime] for f in file_list],
'sha1': sha1,
}
with open(MakeTimestampsFileName(root), 'wb') as f:
json.dump(timestamps_data, f)
def HaveSrcInternalAccess():
"""Checks whether access to src-internal is available."""
with open(os.devnull, 'w') as nul:
if subprocess.call(
['svn', 'ls', '--non-interactive',
'svn://svn.chromium.org/chrome-internal/trunk/src-internal/'],
shell=True, stdin=nul, stdout=nul, stderr=nul) == 0:
return True
return subprocess.call(
['git', '-c', 'core.askpass=true', 'remote', 'show',
'https://chrome-internal.googlesource.com/chrome/src-internal/'],
shell=True, stdin=nul, stdout=nul, stderr=nul) == 0
def LooksLikeGoogler():
"""Checks for a USERDOMAIN environment variable of 'GOOGLE', which
probably implies the current user is a Googler."""
return os.environ.get('USERDOMAIN').upper() == 'GOOGLE'
def CanAccessToolchainBucket():
"""Checks whether the user has access to gs://chrome-wintoolchain/."""
gsutil = download_from_google_storage.Gsutil(
download_from_google_storage.GSUTIL_DEFAULT_PATH, boto_path=None)
code, _, _ = gsutil.check_call('ls', 'gs://chrome-wintoolchain/')
return code == 0
def RequestGsAuthentication():
"""Requests that the user authenticate to be able to access gs:// as a
Googler. This allows much faster downloads, and pulling (old) toolchains
that match src/ revisions.
"""
print 'Access to gs://chrome-wintoolchain/ not configured.'
print '-----------------------------------------------------------------'
print
print 'You appear to be a Googler.'
print
print 'I\'m sorry for the hassle, but you need to do a one-time manual'
print 'authentication. Please run:'
print
print ' download_from_google_storage --config'
print
print 'and follow the instructions.'
print
print 'NOTE 1: Use your google.com credentials, not chromium.org.'
print 'NOTE 2: Just press Enter when asked for a "project-id".'
print
print '-----------------------------------------------------------------'
print
sys.stdout.flush()
sys.exit(1)
def DelayBeforeRemoving(target_dir):
"""A grace period before deleting the out of date toolchain directory."""
if (os.path.isdir(target_dir) and
not bool(int(os.environ.get('CHROME_HEADLESS', '0')))):
for i in range(9, 0, -1):
sys.stdout.write(
'\rRemoving old toolchain in %ds... (Ctrl-C to cancel)' % i)
sys.stdout.flush()
time.sleep(1)
print
def main():
if not sys.platform.startswith(('cygwin', 'win32')):
return 0
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('--output-json', metavar='FILE',
help='write information about toolchain to FILE')
options, args = parser.parse_args()
if sys.platform == 'cygwin':
# This script requires Windows Python, so invoke with depot_tools' Python.
def winpath(path):
return subprocess.check_output(['cygpath', '-w', path]).strip()
python = os.path.join(DEPOT_TOOLS_PATH, 'python.bat')
cmd = [python, winpath(__file__)]
if options.output_json:
cmd.extend(['--output-json', winpath(options.output_json)])
cmd.extend(args)
sys.exit(subprocess.call(cmd))
# We assume that the Pro hash is the first one.
desired_hashes = args
if len(desired_hashes) == 0:
sys.exit('Desired hashes are required.')
# Move to depot_tools\win_toolchain where we'll store our files, and where
# the downloader script is.
os.chdir(os.path.normpath(os.path.join(BASEDIR)))
toolchain_dir = '.'
target_dir = os.path.normpath(os.path.join(toolchain_dir, 'vs2013_files'))
# If the current hash doesn't match what we want in the file, nuke and pave.
# Typically this script is only run when the .sha1 one file is updated, but
# directly calling "gclient runhooks" will also run it, so we cache
# based on timestamps to make that case fast.
current_hash = CalculateHash(target_dir)
if current_hash not in desired_hashes:
should_use_gs = False
if HaveSrcInternalAccess() or LooksLikeGoogler():
should_use_gs = True
if not CanAccessToolchainBucket():
RequestGsAuthentication()
print('Windows toolchain out of date or doesn\'t exist, updating (%s)...' %
('Pro' if should_use_gs else 'Express'))
print(' current_hash: %s' % current_hash)
print(' desired_hashes: %s' % ', '.join(desired_hashes))
sys.stdout.flush()
DelayBeforeRemoving(target_dir)
# This stays resident and will make the rmdir below fail.
with open(os.devnull, 'wb') as nul:
subprocess.call(['taskkill', '/f', '/im', 'mspdbsrv.exe'],
stdin=nul, stdout=nul, stderr=nul)
if os.path.isdir(target_dir):
subprocess.check_call('rmdir /s/q "%s"' % target_dir, shell=True)
args = [sys.executable,
'toolchain2013.py',
'--targetdir', target_dir,
'--sha1', desired_hashes[0]]
if should_use_gs:
args.append('--use-gs')
else:
args.append('--express')
subprocess.check_call(args)
current_hash = CalculateHash(target_dir)
if current_hash not in desired_hashes:
print >> sys.stderr, (
'Got wrong hash after pulling a new toolchain. '
'Wanted one of \'%s\', got \'%s\'.' % (
', '.join(desired_hashes), current_hash))
return 1
SaveTimestampsAndHash(target_dir, current_hash)
if options.output_json:
shutil.copyfile(os.path.join(target_dir, '..', 'data.json'),
options.output_json)
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
jackrzhang/zulip
|
tools/documentation_crawler/documentation_crawler/spiders/check_help_documentation.py
|
3
|
3112
|
import os
from posixpath import basename
from urllib.parse import urlparse
from .common.spiders import BaseDocumentationSpider
from typing import Any, List, Set
def get_images_dir(images_path: str) -> str:
# Get index html file as start url and convert it to file uri
dir_path = os.path.dirname(os.path.realpath(__file__))
target_path = os.path.join(dir_path, os.path.join(*[os.pardir] * 4), images_path)
return os.path.realpath(target_path)
class UnusedImagesLinterSpider(BaseDocumentationSpider):
images_path = ""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.static_images = set() # type: Set[str]
self.images_static_dir = get_images_dir(self.images_path) # type: str
def _is_external_url(self, url: str) -> bool:
is_external = url.startswith('http') and self.start_urls[0] not in url
if self._has_extension(url) and 'localhost:9981/{}'.format(self.images_path) in url:
self.static_images.add(basename(urlparse(url).path))
return is_external or self._has_extension(url)
def closed(self, *args: Any, **kwargs: Any) -> None:
unused_images = set(os.listdir(self.images_static_dir)) - self.static_images
if unused_images:
exception_message = "The following images are not used in documentation " \
"and can be removed: {}"
self._set_error_state()
unused_images_relatedpath = [
os.path.join(self.images_path, img) for img in unused_images]
raise Exception(exception_message.format(', '.join(unused_images_relatedpath)))
class HelpDocumentationSpider(UnusedImagesLinterSpider):
name = "help_documentation_crawler"
start_urls = ['http://localhost:9981/help']
deny_domains = [] # type: List[str]
deny = ['/privacy']
images_path = "static/images/help"
class APIDocumentationSpider(UnusedImagesLinterSpider):
name = 'api_documentation_crawler'
start_urls = ['http://localhost:9981/api']
deny_domains = [] # type: List[str]
images_path = "static/images/api"
class PorticoDocumentationSpider(BaseDocumentationSpider):
name = 'portico_documentation_crawler'
start_urls = ['http://localhost:9981/hello',
'http://localhost:9981/history',
'http://localhost:9981/plans',
'http://localhost:9981/team',
'http://localhost:9981/apps',
'http://localhost:9981/integrations',
'http://localhost:9981/terms',
'http://localhost:9981/privacy',
'http://localhost:9981/features',
'http://localhost:9981/why-zulip',
'http://localhost:9981/for/open-source',
'http://localhost:9981/for/companies',
'http://localhost:9981/for/working-groups-and-communities',
'http://localhost:9981/for/mystery-hunt',
'http://localhost:9981/security']
deny_domains = [] # type: List[str]
|
apache-2.0
|
maiklos-mirrors/jfx78
|
modules/web/src/main/native/Tools/Scripts/webkitpy/tool/steps/update.py
|
124
|
2342
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class Update(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
Options.update,
Options.quiet,
]
def run(self, state):
if not self._options.update:
return
_log.info("Updating working directory")
self._tool.executive.run_and_throw_if_fail(self._update_command(), quiet=self._options.quiet, cwd=self._tool.scm().checkout_root)
def _update_command(self):
update_command = self._tool.deprecated_port().update_webkit_command(self._options.non_interactive)
return update_command
|
gpl-2.0
|
elpaso/QGIS
|
python/plugins/processing/algs/grass7/ext/v_extrude.py
|
45
|
1461
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
v_extrude.py
------------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
def checkParameterValuesBeforeExecuting(alg, parameters, context):
""" Verify if we have the right parameters """
height = alg.parameterAsDouble(parameters, 'height', context)
height_column = alg.parameterAsString(parameters, 'height_column', context)
if (height and height_column) or (not height and not height_column):
return False, alg.tr("You need to set either a fixed height value or the height column!")
return True, None
|
gpl-2.0
|
xiangel/hue
|
desktop/core/ext-py/Django-1.6.10/tests/servers/test_basehttp.py
|
45
|
2207
|
import sys
from django.core.servers.basehttp import WSGIRequestHandler
from django.test import TestCase
from django.utils.six import BytesIO, StringIO
class Stub(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class WSGIRequestHandlerTestCase(TestCase):
def test_strips_underscore_headers(self):
"""WSGIRequestHandler ignores headers containing underscores.
This follows the lead of nginx and Apache 2.4, and is to avoid
ambiguity between dashes and underscores in mapping to WSGI environ,
which can have security implications.
"""
def test_app(environ, start_response):
"""A WSGI app that just reflects its HTTP environ."""
start_response('200 OK', [])
http_environ_items = sorted(
'%s:%s' % (k, v) for k, v in environ.items()
if k.startswith('HTTP_')
)
yield (','.join(http_environ_items)).encode('utf-8')
rfile = BytesIO()
rfile.write(b"GET / HTTP/1.0\r\n")
rfile.write(b"Some-Header: good\r\n")
rfile.write(b"Some_Header: bad\r\n")
rfile.write(b"Other_Header: bad\r\n")
rfile.seek(0)
# WSGIRequestHandler closes the output file; we need to make this a
# no-op so we can still read its contents.
class UnclosableBytesIO(BytesIO):
def close(self):
pass
wfile = UnclosableBytesIO()
def makefile(mode, *a, **kw):
if mode == 'rb':
return rfile
elif mode == 'wb':
return wfile
request = Stub(makefile=makefile)
server = Stub(base_environ={}, get_app=lambda: test_app)
# We don't need to check stderr, but we don't want it in test output
old_stderr = sys.stderr
sys.stderr = StringIO()
try:
# instantiating a handler runs the request as side effect
WSGIRequestHandler(request, '192.168.0.2', server)
finally:
sys.stderr = old_stderr
wfile.seek(0)
body = list(wfile.readlines())[-1]
self.assertEqual(body, b'HTTP_SOME_HEADER:good')
|
apache-2.0
|
coobas/pydons
|
doc/conf.py
|
1
|
8546
|
# -*- coding: utf-8 -*-
#
# pydons documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 9 22:55:05 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import mock
# from http://blog.rtwilson.com/how-to-make-your-sphinx-documentation-compile-with-readthedocs-when-youre-using-numpy-and-scipy/
MOCK_MODULES = ['numpy', 'hdf5storage', 'h5py', 'hdf5storage.Marshallers',
'hdf5storage.utilities', 'hdf5storage.lowlevel', 'netCDF4']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pydons'
copyright = u'2014, Jakub Urban'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.3'
# The full version, including alpha/beta/rc tags.
release = '0.2.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pydonsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pydons.tex', u'pydons Documentation',
u'Jakub Urban', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pydons', u'pydons Documentation',
[u'Jakub Urban'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pydons', u'pydons Documentation',
u'Jakub Urban', 'pydons', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
|
sjperkins/tensorflow
|
tensorflow/python/estimator/inputs/pandas_io.py
|
86
|
4503
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
TypeError: `shuffle` is not bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be explicitly set as boolean; '
'got {}'.format(shuffle))
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
|
apache-2.0
|
ldotlopez/pypes
|
pypes/elements.py
|
1
|
6448
|
import pickle
from urllib.request import urlopen
from ldotcommons.utils import get_debugger
from .core import Element, Filter, Transformer, \
Empty, EOF
class Adder(Transformer):
def transform(self, x):
return x + self.kwargs.get('amount', 0)
class CustomTransformer(Transformer):
def __init__(self, func=None, *args, **kwargs):
super(CustomTransformer, self).__init__(*args, **kwargs)
if not callable(func):
raise ValueError('func is not a callable')
self.func = func
def transform(self, input):
return self.func(input)
class CustomFilter(Filter):
def __init__(self, func=None, *args, **kwargs):
super(CustomTransformer, self).__init__(*args, **kwargs)
if not callable(func):
raise ValueError('func is not a callable')
self.func = func
def filter(self, input):
return self.func(input)
class Debugger(Element):
def run(self):
get_debugger().set_trace()
try:
self.put(self.get())
return True
except Empty:
return False
except EOF:
self.finish()
class DictFixer(Transformer):
"""
Applies changes to dicts
Parameters:
- override: Boolean to control if values should be overriden (forced)
- values: dict with new values
"""
def transform(self, packet):
if not isinstance(packet, dict):
raise ValueError('Packet is not a dict object')
override = self.kwargs.get('override', False)
values = self.kwargs.get('values', {})
if not override:
values = {key: value for (key, value) in values.items() if key not in packet}
packet.update(values)
return packet
class DictFilter(Transformer):
def transform(self, packet):
if not isinstance(packet, dict):
raise ValueError('Packet is not a dict object')
keys = self.kwargs.get('keys', None)
if keys is None:
return packet
return {key: value for (key, value) in packet.items() if key in keys}
class FetcherProcessor(Transformer):
def transform(self, url):
return self.kwargs.get('fetcher').fetch(url)
class GeneratorSrc(Element):
def __init__(self, generator=None, iterations=-1, *args, **kwargs):
super(GeneratorSrc, self).__init__(*args, **kwargs)
self.g = generator
self.i = 0
self.n = iterations
def run(self):
if self.n >= 0 and self.i >= self.n:
self.finish()
try:
packet = next(self.g)
self.put(packet)
self.i += 1
except StopIteration:
self.finish()
class Head(Filter):
def __init__(self, n=-1, *args, **kwargs):
super(Head, self).__init__(*args, **kwargs)
self.n = n
self.i = 0
def filter(self, packet):
r = self.n == -1 or self.i < self.n
self.i += 1
return r
class HttpSrc(Element):
def run(self):
buff = urlopen(self.kwargs.get('url')).read()
self.put(buff)
self.finish()
class NullSrc(Element):
def run(self):
self.finish()
class NullSink(Element):
def run(self):
try:
self.get()
return True
except Empty:
return False
except EOF:
self.finish()
class Packer(Element):
def __init__(self, *args, **kwargs):
super(Packer, self).__init__(self, *args, **kwargs)
self._packets = []
def run(self):
try:
self._packets.append(self.get())
return True
except Empty:
return False
except EOF:
self.put(self._packets)
self.finish()
class PickleSrc(Element):
def __init__(self, filename=None):
fh = open(filename)
self.packets = pickle.load(fh)
fh.close()
def run(self):
try:
packet = self.packets.pop()
self.put(packet)
except IndexError:
self.finish()
class PickleSink(Element):
def __init__(self, **kwargs):
filename = kwargs.pop('filename', None)
super(PickleSink, self).__init__(**kwargs)
self.filename = filename
self.packets = []
def run(self):
try:
self.packets.append(self.get())
return True
except Empty:
return False
except EOF:
fh = open(self.filename, 'wb+')
pickle.dump(self.packets, fh)
fh.close()
self.finish()
class SampleSrc(Element):
def run(self):
try:
sample = self.kwargs.get('sample', [])
self.put(sample.pop(0))
return True
except IndexError:
self.finish()
class StoreSink(Element):
def __init__(self, *args, **kwargs):
super(StoreSink, self).__init__(*args, **kwargs)
self.packets = []
def run(self):
try:
self.packets.append(self.get())
return True
except Empty:
return False
except EOF:
self.finish()
class Tee(Element):
def __init__(self, *args, n_outputs=1, output_pattern='tee_%02d', **kwargs):
super(Tee, self).__init__(args, **kwargs)
self.n_outputs = n_outputs
self.output_pattern = output_pattern
def run(self):
try:
packet = self.get()
self.put(packet, self.output_pattern % 0)
for n in range(1, self.n_outputs):
copy = pickle.loads(pickle.dumps(packet))
self.put(copy, self.output_pattern % n)
return True
except Empty:
return False
except EOF:
self.finish()
class Zip(Element):
def __init__(self, n_inputs=1, input_pattern='zip_%02d'):
super(Zip, self).__init__(n_inputs=n_inputs, input_pattern=input_pattern)
self.inputs = [input_pattern % x for x in range(n_inputs)]
def run(self):
if not self.inputs:
self.finish()
input = self.inputs.pop(0)
try:
x = self.get(input)
self.put(x)
self.inputs.append(input)
return True
except Empty:
self.inputs.append(input)
return False
except EOF:
return False
|
gpl-2.0
|
stdevel/arsa
|
arsa.py
|
1
|
9705
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# arsa.py - a script for archiving and removing old
# Spacewalk, Red Hat Satellite 5.x or SUSE Manager actions.
#
# 2016 By Christian Stankowic
# <info at stankowic hyphen development dot net>
# https://github.com/stdevel
#
from optparse import OptionParser, OptionGroup
import logging
import xmlrpclib
import os
import stat
import getpass
import time
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
#set logger
LOGGER = logging.getLogger('arsa.py')
#list of supported API levels
supported_api = ["11.1","12","13","13.0","14","14.0","15","15.0","16","16.0","17","17.0"]
def clean_actions():
#clean _all_ the actions
#setup client and key depending on mode
client = xmlrpclib.Server(satellite_url, verbose=options.debug)
if options.authfile:
#use authfile
LOGGER.debug("Using authfile")
try:
#check filemode and read file
filemode = oct(stat.S_IMODE(os.lstat(options.authfile).st_mode))
if filemode == "0600":
LOGGER.debug("File permission ({0}) matches 0600".format(filemode))
fo = open(options.authfile, "r")
s_username=fo.readline().replace("\n", "")
s_password=fo.readline().replace("\n", "")
key = client.auth.login(s_username, s_password)
else:
LOGGER.error("File permission ({0}) not matching 0600!".format(filemode))
exit(1)
except OSError:
LOGGER.error("File non-existent or permissions not 0600!")
exit(1)
elif "SATELLITE_LOGIN" in os.environ and "SATELLITE_PASSWORD" in os.environ:
#shell variables
LOGGER.debug("Checking shell variables")
key = client.auth.login(os.environ["SATELLITE_LOGIN"], os.environ["SATELLITE_PASSWORD"])
else:
#prompt user
LOGGER.debug("Prompting for login credentials")
s_username = raw_input("Username: ")
s_password = getpass.getpass("Password: ")
key = client.auth.login(s_username, s_password)
#check whether the API version matches the minimum required
api_level = client.api.getVersion()
if not api_level in supported_api:
LOGGER.error("Your API version ({0}) does not support the needed calls. You need API version 1.8 (11.1) or higher!".format(api_level))
exit(1)
else:
LOGGER.debug("Supported API version ({0}) fround".format(api_level))
#retrieve completed, already archived and failed actions
to_archive = []
completed_actions = client.schedule.listCompletedActions(key)
archived_actions = client.schedule.listArchivedActions(key)
failed_actions = client.schedule.listFailedActions(key)
#what to consider as a system task
system_tasks = [ 'Show differences between', 'Activation Key Package Auto-Install', 'Package List Refresh', 'Hardware List Refresh' ]
#print actions
LOGGER.debug("Completed:\n{0}Archived:\n{1}".format(completed_actions, archived_actions))
#go through completed actions and remove them if wanted
if options.dry_run: LOGGER.info("Things I'd like to clean (completed):\n")
for entry in completed_actions:
if options.only_system_tasks:
for task in system_tasks:
if task in entry["name"]:
LOGGER.info("Found completed system task action #{0} ({1})...".format(entry['id'], entry['name']))
to_archive.append(entry["id"])
else:
LOGGER.info("Found completed action #{0} ({1})...".format(entry['id'], entry['name']))
to_archive.append(entry["id"])
#also clean-up already archived actions if wanted
if options.remove_all:
#remove archived actions
if options.dry_run: LOGGER.info("Things I'd like to remove (archived):\n")
for entry in archived_actions:
if options.only_system_tasks:
for task in system_tasks:
if task in entry["name"]:
LOGGER.info("Found archived system task action #{0} ({1})...".format(entry['id'], entry['name']))
to_archive.append(entry["id"])
else:
LOGGER.info("Found archvied action #{0} ({1})...".format(entry['id'], entry['name']))
to_archive.append(entry["id"])
#also clean-up failed actions if wanted
if options.include_failed:
#remove failed actions
if options.dry_run: LOGGER.info("Things I'd like to remove (failed):\n")
for entry in failed_actions:
if options.only_system_tasks:
for task in system_tasks:
if task in entry["name"]:
LOGGER.info("Found failed system task action #{0} ({1})...".format(entry['id'], entry['name']))
to_archive.append(entry["id"])
else:
LOGGER.info("Found failed action #{0} ({1})...".format(entry['id'], entry['name']))
to_archive.append(entry["id"])
#archive (and remove) actions if wanted
LOGGER.debug("\nto_archive: {0}".format(str(to_archive)))
#removing duplicate entries
to_archive = list(set(to_archive))
#remove actions if dry_run not set
if options.dry_run == False:
LOGGER.info("Archiving actions...")
#enable 100 actions-per-call workaround if we found hundreds of actions
if len(to_archive) > 100:
LOGGER.debug("Enabling workaround to archive/delete 100+ actions...")
temp_actions = []
for action in to_archive:
if len(temp_actions) != 100:
temp_actions.append(action)
else:
LOGGER.debug("Removing actions: {0}".format(str(temp_actions)))
client.schedule.archiveActions(key,temp_actions)
time.sleep(.5)
if options.remove_all:
client.schedule.deleteActions(key,temp_actions)
time.sleep(.5)
temp_actions = []
else:
client.schedule.archiveActions(key,to_archive)
if options.remove_all:
client.schedule.deleteActions(key,to_archive)
else:
LOGGER.info("Stopping here as we don't really want to clean things up")
#logout and exit
client.auth.logout(key)
if __name__ == "__main__":
#define description, version and load parser
desc='''%prog is used to archive completed actions and remove archived actions on Spacewalk, Red Hat Satellite 5.x and SUSE Manager. Login credentials are assigned using the following shell variables:
SATELLITE_LOGIN username
SATELLITE_PASSWORD password
It is also possible to create an authfile (permissions 0600) for usage with this script. The first line needs to contain the username, the second line should consist of the appropriate password.
If you're not defining variables or an authfile you will be prompted to enter your login information.
Checkout the GitHub page for updates: https://github.com/stdevel/arsa'''
parser = OptionParser(description=desc,version="%prog version 0.4.1")
#define option groups
gen_opts = OptionGroup(parser, "Generic Options")
sat_opts = OptionGroup(parser, "Satellite Options")
parser.add_option_group(gen_opts)
parser.add_option_group(sat_opts)
#-a / --authfile
sat_opts.add_option("-a", "--authfile", dest="authfile", metavar="FILE", default="", help="defines an auth file to use instead of shell variables")
#-s / --server
sat_opts.add_option("-s", "--server", dest="server", metavar="SERVER", default="localhost", help="defines the server to use (default: localhost)")
#-d / --debug
gen_opts.add_option("-d", "--debug", dest="debug", default=False, action="store_true", help="enable debugging outputs (default: no)")
#-r / --remove
sat_opts.add_option("-r", "--remove", dest="remove_all", default=False, action="store_true", help="archives completed actions and removes all archived actions (default: no)")
#-n / --dry-run
sat_opts.add_option("-n", "--dry-run", dest="dry_run", default=False, action="store_true", help="only lists actions that would be archived (default: no)")
#-f / --include-failed
sat_opts.add_option("-f", "--include-failed", dest="include_failed", default=False, action="store_true", help="also include failed actions (default: no)")
#-t / --system-tasks
sat_opts.add_option("-t", "--only-system-tasks", dest="only_system_tasks", default=False, action="store_true", help="only consider automated system tasks such as package list refresh (default: no)")
#parse arguments
(options, args) = parser.parse_args()
#define URL and login information
satellite_url = "http://"+options.server+"/rpc/api"
#set logger level
if options.debug:
logging.basicConfig(level=logging.DEBUG)
LOGGER.setLevel(logging.DEBUG)
else:
logging.basicConfig()
LOGGER.setLevel(logging.INFO)
#clean actions
clean_actions()
|
gpl-3.0
|
0359xiaodong/viewfinder
|
backend/base/secrets_tool.py
|
13
|
4089
|
#!/usr/bin/env python
#
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Command-line tool for creating and encrypting secrets using the
secrets_manager module.
% python -m viewfinder.backend.base.secrets_tool \
--secrets_mode={list_secrets, encrypt_secrets, get_secret,
put_secret, put_crypt_keyset}
"""
__author__ = '[email protected] (Spencer Kimball)'
import json
import logging
import sys
from tornado import ioloop, options
from viewfinder.backend.base import base_options # imported for option definitions
from viewfinder.backend.base import secrets, util
options.define('secrets_mode', 'list_secrets',
help='mode for command line operation; see help text in module')
options.define('secret', '', help='name of the secret to put or get')
options.define('shared', default=True,
help='work on the shared secrets manager. If false, use the user secrets manager')
def _GetSecretsManager():
if options.options.shared:
return secrets.GetSharedSecretsManager()
else:
return secrets.GetUserSecretsManager()
def _ListSecrets(io_loop):
"""Lists all secrets."""
for f in _GetSecretsManager().ListSecrets():
print ' %s' % f
io_loop.stop()
def _GetSecret(io_loop, secret):
"""Get a secret by name and output to stdout."""
print '%s:\n%s' % (secret, _GetSecretsManager().GetSecret(secret))
io_loop.stop()
def _PutSecret(io_loop, secret):
"""Reads the new secret from stdin and writes to secrets subdir."""
_GetSecretsManager().PutSecret(secret, sys.stdin.read())
io_loop.stop()
def _PutCryptKeyset(io_loop, secret):
"""Creates a new Keyczar crypt keyset used for encryption and decryption
and writes it to secrets subdir."""
_GetSecretsManager().PutSecret(secret, json.dumps(secrets.CreateCryptKeyset(secret)))
io_loop.stop()
def _PutSigningKeyset(io_loop, secret):
"""Creates a new Keyczar crypt keyset used for signing and signature
verification and writes it to secrets subdir."""
_GetSecretsManager().PutSecret(secret, json.dumps(secrets.CreateSigningKeyset(secret)))
io_loop.stop()
def _EncryptSecrets(io_loop):
"""Lists all secrets files and encrypts each in turn. The passphrase
for encryption is solicited twice for confirmation.
"""
print 'Initializing existing secrets manager...'
ex_sm = _GetSecretsManager()
print 'Initializing new secrets manager...'
if options.options.shared:
new_sm = secrets.SecretsManager('shared', options.options.domain, options.options.secrets_dir)
else:
new_sm = secrets.SecretsManager('user', options.options.domain, options.options.user_secrets_dir)
new_sm.Init(should_prompt=True, query_twice=True)
print 'Encrypting secrets...'
for secret in ex_sm.ListSecrets():
print ' %s' % secret
new_sm.PutSecret(secret, ex_sm.GetSecret(secret))
io_loop.stop()
def main():
"""Parses command line options and, if directed, executes some operation
to transform or create secrets from the command line.
"""
io_loop = ioloop.IOLoop.current()
options.parse_command_line()
def _OnException(type, value, traceback):
logging.error('failed %s' % options.options.secrets_mode, exc_info=(type, value, traceback))
io_loop.stop()
sys.exit(1)
with util.ExceptionBarrier(_OnException):
if options.options.secrets_mode == 'list_secrets':
_ListSecrets(io_loop)
elif options.options.secrets_mode == 'get_secret':
_GetSecret(io_loop, options.options.secret)
elif options.options.secrets_mode == 'put_secret':
_PutSecret(io_loop, options.options.secret)
elif options.options.secrets_mode == 'put_crypt_keyset':
_PutCryptKeyset(io_loop, options.options.secret)
elif options.options.secrets_mode == 'put_signing_keyset':
_PutSigningKeyset(io_loop, options.options.secret)
elif options.options.secrets_mode == 'encrypt_secrets':
_EncryptSecrets(io_loop)
else:
raise Exception('unknown secrets_mode: %s' % options.options.secrets_mode)
io_loop.start()
if __name__ == '__main__':
sys.exit(main())
|
apache-2.0
|
vitmod/enigma2-test
|
lib/python/Screens/About.py
|
2
|
19638
|
from Screen import Screen
from Components.ActionMap import ActionMap
from Components.Button import Button
from Components.Sources.StaticText import StaticText
from Components.Harddisk import Harddisk
from Components.NimManager import nimmanager
from Components.About import about
from Components.ScrollLabel import ScrollLabel
from Components.Console import Console
from enigma import eTimer, getEnigmaVersionString
from boxbranding import getBoxType, getMachineBrand, getMachineName, getImageVersion, getImageBuild, getDriverDate
from Components.Pixmap import MultiPixmap
from Components.Network import iNetwork
from Tools.StbHardware import getFPVersion
from os import path
from re import search
def getAboutText():
AboutText = ""
AboutText += _("Model:\t%s %s\n") % (getMachineBrand(), getMachineName())
if path.exists('/proc/stb/info/chipset'):
AboutText += _("Chipset:\t%s") % about.getChipSetString() + "\n"
cpuMHz = ""
if path.exists('/proc/cpuinfo'):
f = open('/proc/cpuinfo', 'r')
temp = f.readlines()
f.close()
try:
for lines in temp:
lisp = lines.split(': ')
if lisp[0].startswith('cpu MHz'):
#cpuMHz = " (" + lisp[1].replace('\n', '') + " MHz)"
cpuMHz = " (" + str(int(float(lisp[1].replace('\n', '')))) + " MHz)"
break
except:
pass
AboutText += _("CPU:\t%s") % about.getCPUString() + cpuMHz + "\n"
AboutText += _("Cores:\t%s") % about.getCpuCoresString() + "\n"
AboutText += _("Version:\t%s") % getImageVersion() + "\n"
AboutText += _("Build:\t%s") % getImageBuild() + "\n"
AboutText += _("Kernel:\t%s") % about.getKernelVersionString() + "\n"
string = getDriverDate()
year = string[0:4]
month = string[4:6]
day = string[6:8]
driversdate = '-'.join((year, month, day))
AboutText += _("Drivers:\t%s") % driversdate + "\n"
AboutText += _("Last update:\t%s") % getEnigmaVersionString() + "\n\n"
AboutText += _("GStreamer:\t%s") % about.getGStreamerVersionString() + "\n"
fp_version = getFPVersion()
if fp_version is None:
fp_version = ""
elif fp_version != 0:
fp_version = _("Frontprocessor version: %s") % fp_version
AboutText += fp_version + "\n"
tempinfo = ""
if path.exists('/proc/stb/sensors/temp0/value'):
f = open('/proc/stb/sensors/temp0/value', 'r')
tempinfo = f.read()
f.close()
elif path.exists('/proc/stb/fp/temp_sensor'):
f = open('/proc/stb/fp/temp_sensor', 'r')
tempinfo = f.read()
f.close()
if tempinfo and int(tempinfo.replace('\n', '')) > 0:
mark = str('\xc2\xb0')
AboutText += _("System temperature:\t%s") % tempinfo.replace('\n', '').replace(' ','') + mark + "C\n"
tempinfo = ""
if path.exists('/proc/stb/fp/temp_sensor_avs'):
f = open('/proc/stb/fp/temp_sensor_avs', 'r')
tempinfo = f.read()
f.close()
if tempinfo and int(tempinfo.replace('\n', '')) > 0:
mark = str('\xc2\xb0')
AboutText += _("Processor temperature:\t%s") % tempinfo.replace('\n', '').replace(' ','') + mark + "C\n"
AboutLcdText = AboutText.replace('\t', ' ')
return AboutText, AboutLcdText
class About(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Image Information"))
self.skinName = "AboutOE"
self.populate()
self["key_green"] = Button(_("Translations"))
self["actions"] = ActionMap(["SetupActions", "ColorActions", "TimerEditActions"],
{
"cancel": self.close,
"ok": self.close,
"log": self.showAboutReleaseNotes,
"up": self["AboutScrollLabel"].pageUp,
"down": self["AboutScrollLabel"].pageDown,
"green": self.showTranslationInfo,
})
def populate(self):
self["lab1"] = StaticText(_("openATV"))
self["lab2"] = StaticText(_("By openATV Image Team"))
model = None
self["lab3"] = StaticText(_("Support at") + " www.opena.tv")
AboutText = getAboutText()[0]
self["AboutScrollLabel"] = ScrollLabel(AboutText)
def showTranslationInfo(self):
self.session.open(TranslationInfo)
def showAboutReleaseNotes(self):
self.session.open(ViewGitLog)
def createSummary(self):
return AboutSummary
class Devices(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Device Information"))
self["TunerHeader"] = StaticText(_("Detected NIMs:"))
self["HDDHeader"] = StaticText(_("Detected Devices:"))
self["MountsHeader"] = StaticText(_("Network Servers:"))
self["nims"] = StaticText()
self["hdd"] = StaticText()
self["mounts"] = StaticText()
self.list = []
self.activityTimer = eTimer()
self.activityTimer.timeout.get().append(self.populate2)
self["actions"] = ActionMap(["SetupActions", "ColorActions", "TimerEditActions"],
{
"cancel": self.close,
"ok": self.close,
})
self.onLayoutFinish.append(self.populate)
def populate(self):
self.mountinfo = ''
self["actions"].setEnabled(False)
scanning = _("Wait please while scanning for devices...")
self["nims"].setText(scanning)
self["hdd"].setText(scanning)
self['mounts'].setText(scanning)
self.activityTimer.start(1)
def populate2(self):
self.activityTimer.stop()
self.Console = Console()
niminfo = ""
nims = nimmanager.nimList()
for count in range(len(nims)):
if niminfo:
niminfo += "\n"
niminfo += nims[count]
self["nims"].setText(niminfo)
self.list = []
list2 = []
f = open('/proc/partitions', 'r')
for line in f.readlines():
parts = line.strip().split()
if not parts:
continue
device = parts[3]
if not search('sd[a-z][1-9]', device):
continue
if device in list2:
continue
mount = '/dev/' + device
f = open('/proc/mounts', 'r')
for line in f.readlines():
if device in line:
parts = line.strip().split()
mount = str(parts[1])
break
f.close()
if not mount.startswith('/dev/'):
size = Harddisk(device).diskSize()
free = Harddisk(device).free()
if ((float(size) / 1024) / 1024) >= 1:
sizeline = _("Size: ") + str(round(((float(size) / 1024) / 1024), 2)) + _("TB")
elif (size / 1024) >= 1:
sizeline = _("Size: ") + str(round((float(size) / 1024), 2)) + _("GB")
elif size >= 1:
sizeline = _("Size: ") + str(size) + _("MB")
else:
sizeline = _("Size: ") + _("unavailable")
if ((float(free) / 1024) / 1024) >= 1:
freeline = _("Free: ") + str(round(((float(free) / 1024) / 1024), 2)) + _("TB")
elif (free / 1024) >= 1:
freeline = _("Free: ") + str(round((float(free) / 1024), 2)) + _("GB")
elif free >= 1:
freeline = _("Free: ") + str(free) + _("MB")
else:
freeline = _("Free: ") + _("full")
self.list.append(mount + '\t' + sizeline + ' \t' + freeline)
else:
self.list.append(mount + '\t' + _('Not mounted'))
list2.append(device)
self.list = '\n'.join(self.list)
self["hdd"].setText(self.list)
self.Console.ePopen("df -mh | grep -v '^Filesystem'", self.Stage1Complete)
def Stage1Complete(self, result, retval, extra_args=None):
result = result.replace('\n ', ' ').split('\n')
self.mountinfo = ""
for line in result:
self.parts = line.split()
if line and self.parts[0] and (self.parts[0].startswith('192') or self.parts[0].startswith('//192')):
line = line.split()
try:
ipaddress = line[0]
except:
ipaddress = ""
try:
mounttotal = line[1]
except:
mounttotal = ""
try:
mountfree = line[3]
except:
mountfree = ""
if self.mountinfo:
self.mountinfo += "\n"
self.mountinfo += "%s (%sB, %sB %s)" % (ipaddress, mounttotal, mountfree, _("free"))
if self.mountinfo:
self["mounts"].setText(self.mountinfo)
else:
self["mounts"].setText(_('none'))
self["actions"].setEnabled(True)
def createSummary(self):
return AboutSummary
class SystemMemoryInfo(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Memory Information"))
self.skinName = ["SystemMemoryInfo", "About"]
self["lab1"] = StaticText(_("openATV"))
self["lab2"] = StaticText(_("By openATV Image Team"))
self["AboutScrollLabel"] = ScrollLabel()
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"cancel": self.close,
"ok": self.close,
})
out_lines = file("/proc/meminfo").readlines()
self.AboutText = _("RAM") + '\n\n'
RamTotal = "-"
RamFree = "-"
for lidx in range(len(out_lines) - 1):
tstLine = out_lines[lidx].split()
if "MemTotal:" in tstLine:
MemTotal = out_lines[lidx].split()
self.AboutText += _("Total Memory:") + "\t" + MemTotal[1] + "\n"
if "MemFree:" in tstLine:
MemFree = out_lines[lidx].split()
self.AboutText += _("Free Memory:") + "\t" + MemFree[1] + "\n"
if "Buffers:" in tstLine:
Buffers = out_lines[lidx].split()
self.AboutText += _("Buffers:") + "\t" + Buffers[1] + "\n"
if "Cached:" in tstLine:
Cached = out_lines[lidx].split()
self.AboutText += _("Cached:") + "\t" + Cached[1] + "\n"
if "SwapTotal:" in tstLine:
SwapTotal = out_lines[lidx].split()
self.AboutText += _("Total Swap:") + "\t" + SwapTotal[1] + "\n"
if "SwapFree:" in tstLine:
SwapFree = out_lines[lidx].split()
self.AboutText += _("Free Swap:") + "\t" + SwapFree[1] + "\n\n"
self["actions"].setEnabled(False)
self.Console = Console()
self.Console.ePopen("df -mh / | grep -v '^Filesystem'", self.Stage1Complete)
def Stage1Complete(self, result, retval, extra_args=None):
flash = str(result).replace('\n', '')
flash = flash.split()
RamTotal = flash[1]
RamFree = flash[3]
self.AboutText += _("FLASH") + '\n\n'
self.AboutText += _("Total:") + "\t" + RamTotal + "\n"
self.AboutText += _("Free:") + "\t" + RamFree + "\n\n"
self["AboutScrollLabel"].setText(self.AboutText)
self["actions"].setEnabled(True)
def createSummary(self):
return AboutSummary
class SystemNetworkInfo(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Network Information"))
self.skinName = ["SystemNetworkInfo", "WlanStatus"]
self["LabelBSSID"] = StaticText()
self["LabelESSID"] = StaticText()
self["LabelQuality"] = StaticText()
self["LabelSignal"] = StaticText()
self["LabelBitrate"] = StaticText()
self["LabelEnc"] = StaticText()
self["BSSID"] = StaticText()
self["ESSID"] = StaticText()
self["quality"] = StaticText()
self["signal"] = StaticText()
self["bitrate"] = StaticText()
self["enc"] = StaticText()
self["IFtext"] = StaticText()
self["IF"] = StaticText()
self["Statustext"] = StaticText()
self["statuspic"] = MultiPixmap()
self["statuspic"].setPixmapNum(1)
self["statuspic"].show()
self.iface = None
self.createscreen()
self.iStatus = None
if iNetwork.isWirelessInterface(self.iface):
try:
from Plugins.SystemPlugins.WirelessLan.Wlan import iStatus
self.iStatus = iStatus
except:
pass
self.resetList()
self.onClose.append(self.cleanup)
self.updateStatusbar()
self["key_red"] = StaticText(_("Close"))
self["actions"] = ActionMap(["SetupActions", "ColorActions", "DirectionActions"],
{
"cancel": self.close,
"ok": self.close,
"up": self["AboutScrollLabel"].pageUp,
"down": self["AboutScrollLabel"].pageDown
})
def createscreen(self):
self.AboutText = ""
self.iface = "eth0"
eth0 = about.getIfConfig('eth0')
if eth0.has_key('addr'):
self.AboutText += _("IP:") + "\t" + eth0['addr'] + "\n"
if eth0.has_key('netmask'):
self.AboutText += _("Netmask:") + "\t" + eth0['netmask'] + "\n"
if eth0.has_key('hwaddr'):
self.AboutText += _("MAC:") + "\t" + eth0['hwaddr'] + "\n"
self.iface = 'eth0'
eth1 = about.getIfConfig('eth1')
if eth1.has_key('addr'):
self.AboutText += _("IP:") + "\t" + eth1['addr'] + "\n"
if eth1.has_key('netmask'):
self.AboutText += _("Netmask:") + "\t" + eth1['netmask'] + "\n"
if eth1.has_key('hwaddr'):
self.AboutText += _("MAC:") + "\t" + eth1['hwaddr'] + "\n"
self.iface = 'eth1'
ra0 = about.getIfConfig('ra0')
if ra0.has_key('addr'):
self.AboutText += _("IP:") + "\t" + ra0['addr'] + "\n"
if ra0.has_key('netmask'):
self.AboutText += _("Netmask:") + "\t" + ra0['netmask'] + "\n"
if ra0.has_key('hwaddr'):
self.AboutText += _("MAC:") + "\t" + ra0['hwaddr'] + "\n"
self.iface = 'ra0'
wlan0 = about.getIfConfig('wlan0')
if wlan0.has_key('addr'):
self.AboutText += _("IP:") + "\t" + wlan0['addr'] + "\n"
if wlan0.has_key('netmask'):
self.AboutText += _("Netmask:") + "\t" + wlan0['netmask'] + "\n"
if wlan0.has_key('hwaddr'):
self.AboutText += _("MAC:") + "\t" + wlan0['hwaddr'] + "\n"
self.iface = 'wlan0'
rx_bytes, tx_bytes = about.getIfTransferredData(self.iface)
self.AboutText += "\n" + _("Bytes received:") + "\t" + rx_bytes + "\n"
self.AboutText += _("Bytes sent:") + "\t" + tx_bytes + "\n"
hostname = file('/proc/sys/kernel/hostname').read()
self.AboutText += "\n" + _("Hostname:") + "\t" + hostname + "\n"
self["AboutScrollLabel"] = ScrollLabel(self.AboutText)
def cleanup(self):
if self.iStatus:
self.iStatus.stopWlanConsole()
def resetList(self):
if self.iStatus:
self.iStatus.getDataForInterface(self.iface, self.getInfoCB)
def getInfoCB(self, data, status):
self.LinkState = None
if data is not None:
if data is True:
if status is not None:
if self.iface == 'wlan0' or self.iface == 'ra0':
if status[self.iface]["essid"] == "off":
essid = _("No Connection")
else:
essid = status[self.iface]["essid"]
if status[self.iface]["accesspoint"] == "Not-Associated":
accesspoint = _("Not-Associated")
essid = _("No Connection")
else:
accesspoint = status[self.iface]["accesspoint"]
if self.has_key("BSSID"):
self.AboutText += _('Accesspoint:') + '\t' + accesspoint + '\n'
if self.has_key("ESSID"):
self.AboutText += _('SSID:') + '\t' + essid + '\n'
quality = status[self.iface]["quality"]
if self.has_key("quality"):
self.AboutText += _('Link Quality:') + '\t' + quality + '\n'
if status[self.iface]["bitrate"] == '0':
bitrate = _("Unsupported")
else:
bitrate = str(status[self.iface]["bitrate"]) + " Mb/s"
if self.has_key("bitrate"):
self.AboutText += _('Bitrate:') + '\t' + bitrate + '\n'
signal = status[self.iface]["signal"]
if self.has_key("signal"):
self.AboutText += _('Signal Strength:') + '\t' + signal + '\n'
if status[self.iface]["encryption"] == "off":
if accesspoint == "Not-Associated":
encryption = _("Disabled")
else:
encryption = _("Unsupported")
else:
encryption = _("Enabled")
if self.has_key("enc"):
self.AboutText += _('Encryption:') + '\t' + encryption + '\n'
if status[self.iface]["essid"] == "off" or status[self.iface]["accesspoint"] == "Not-Associated" or status[self.iface]["accesspoint"] is False:
self.LinkState = False
self["statuspic"].setPixmapNum(1)
self["statuspic"].show()
else:
self.LinkState = True
iNetwork.checkNetworkState(self.checkNetworkCB)
self["AboutScrollLabel"].setText(self.AboutText)
def exit(self):
self.close(True)
def updateStatusbar(self):
self["IFtext"].setText(_("Network:"))
self["IF"].setText(iNetwork.getFriendlyAdapterName(self.iface))
self["Statustext"].setText(_("Link:"))
if iNetwork.isWirelessInterface(self.iface):
try:
self.iStatus.getDataForInterface(self.iface, self.getInfoCB)
except:
self["statuspic"].setPixmapNum(1)
self["statuspic"].show()
else:
iNetwork.getLinkState(self.iface, self.dataAvail)
def dataAvail(self, data):
self.LinkState = None
for line in data.splitlines():
line = line.strip()
if 'Link detected:' in line:
if "yes" in line:
self.LinkState = True
else:
self.LinkState = False
if self.LinkState:
iNetwork.checkNetworkState(self.checkNetworkCB)
else:
self["statuspic"].setPixmapNum(1)
self["statuspic"].show()
def checkNetworkCB(self, data):
try:
if iNetwork.getAdapterAttribute(self.iface, "up") is True:
if self.LinkState is True:
if data <= 2:
self["statuspic"].setPixmapNum(0)
else:
self["statuspic"].setPixmapNum(1)
self["statuspic"].show()
else:
self["statuspic"].setPixmapNum(1)
self["statuspic"].show()
else:
self["statuspic"].setPixmapNum(1)
self["statuspic"].show()
except:
pass
def createSummary(self):
return AboutSummary
class AboutSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent=parent)
self["selected"] = StaticText("openATV:" + getImageVersion())
AboutText = getAboutText()[1]
self["AboutText"] = StaticText(AboutText)
class ViewGitLog(Screen):
def __init__(self, session, args=None):
Screen.__init__(self, session)
self.skinName = "SoftwareUpdateChanges"
self.setTitle(_("OE Changes"))
self.logtype = 'oe'
self["text"] = ScrollLabel()
self['title_summary'] = StaticText()
self['text_summary'] = StaticText()
self["key_red"] = Button(_("Close"))
self["key_green"] = Button(_("OK"))
self["key_yellow"] = Button(_("Show E2 Log"))
self["myactions"] = ActionMap(['ColorActions', 'OkCancelActions', 'DirectionActions'],
{
'cancel': self.closeRecursive,
'green': self.closeRecursive,
"red": self.closeRecursive,
"yellow": self.changelogtype,
"left": self.pageUp,
"right": self.pageDown,
"down": self.pageDown,
"up": self.pageUp
},-1)
self.onLayoutFinish.append(self.getlog)
def changelogtype(self):
if self.logtype == 'e2':
self["key_yellow"].setText(_("Show E2 Log"))
self.setTitle(_("OE Changes"))
self.logtype = 'oe'
else:
self["key_yellow"].setText(_("Show OE Log"))
self.setTitle(_("Enigma2 Changes"))
self.logtype = 'e2'
self.getlog()
def pageUp(self):
self["text"].pageUp()
def pageDown(self):
self["text"].pageDown()
def getlog(self):
fd = open('/etc/' + self.logtype + '-git.log', 'r')
releasenotes = fd.read()
fd.close()
releasenotes = releasenotes.replace('\nopenatv: build', "\n\nopenatv: build")
self["text"].setText(releasenotes)
summarytext = releasenotes
try:
if self.logtype == 'e2':
self['title_summary'].setText(_("E2 Log"))
self['text_summary'].setText(_("Enigma2 Changes"))
else:
self['title_summary'].setText(_("OE Log"))
self['text_summary'].setText(_("OE Changes"))
except:
self['title_summary'].setText("")
self['text_summary'].setText("")
def unattendedupdate(self):
self.close((_("Unattended upgrade without GUI and reboot system"), "cold"))
def closeRecursive(self):
self.close((_("Cancel"), ""))
class TranslationInfo(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Translation Information"))
# don't remove the string out of the _(), or it can't be "translated" anymore.
# TRANSLATORS: Add here whatever should be shown in the "translator" about screen, up to 6 lines (use \n for newline)
info = _("TRANSLATOR_INFO")
if info == "TRANSLATOR_INFO":
info = ""
infolines = _("").split("\n")
infomap = {}
for x in infolines:
l = x.split(': ')
if len(l) != 2:
continue
(type, value) = l
infomap[type] = value
print infomap
self["TranslationInfo"] = StaticText(info)
translator_name = infomap.get("Language-Team", "none")
if translator_name == "none":
translator_name = infomap.get("Last-Translator", "")
self["TranslatorName"] = StaticText(translator_name)
self["actions"] = ActionMap(["SetupActions"],
{
"cancel": self.close,
"ok": self.close,
})
|
gpl-2.0
|
Lujeni/ansible
|
lib/ansible/modules/network/fortimanager/fmgr_ha.py
|
38
|
13484
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"
}
DOCUMENTATION = '''
---
module: fmgr_ha
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Manages the High-Availability State of FortiManager Clusters and Nodes.
description: Change HA state or settings of FortiManager nodes (Standalone/Master/Slave).
options:
fmgr_ha_mode:
description:
- Sets the role of the FortiManager host for HA.
required: false
choices: ["standalone", "master", "slave"]
fmgr_ha_peer_ipv4:
description:
- Sets the IPv4 address of a HA peer.
required: false
fmgr_ha_peer_ipv6:
description:
- Sets the IPv6 address of a HA peer.
required: false
fmgr_ha_peer_sn:
description:
- Sets the HA Peer Serial Number.
required: false
fmgr_ha_peer_status:
description:
- Sets the peer status to enable or disable.
required: false
choices: ["enable", "disable"]
fmgr_ha_cluster_pw:
description:
- Sets the password for the HA cluster. Only required once. System remembers between HA mode switches.
required: false
fmgr_ha_cluster_id:
description:
- Sets the ID number of the HA cluster. Defaults to 1.
required: false
default: 1
fmgr_ha_hb_threshold:
description:
- Sets heartbeat lost threshold (1-255).
required: false
default: 3
fmgr_ha_hb_interval:
description:
- Sets the heartbeat interval (1-255).
required: false
default: 5
fmgr_ha_file_quota:
description:
- Sets the File quota in MB (2048-20480).
required: false
default: 4096
'''
EXAMPLES = '''
- name: SET FORTIMANAGER HA NODE TO MASTER
fmgr_ha:
fmgr_ha_mode: "master"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
- name: SET FORTIMANAGER HA NODE TO SLAVE
fmgr_ha:
fmgr_ha_mode: "slave"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
- name: SET FORTIMANAGER HA NODE TO STANDALONE
fmgr_ha:
fmgr_ha_mode: "standalone"
- name: ADD FORTIMANAGER HA PEER
fmgr_ha:
fmgr_ha_peer_ipv4: "192.168.1.254"
fmgr_ha_peer_sn: "FMG-VM1234567890"
fmgr_ha_peer_status: "enable"
- name: CREATE CLUSTER ON MASTER
fmgr_ha:
fmgr_ha_mode: "master"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
fmgr_ha_hb_threshold: "10"
fmgr_ha_hb_interval: "15"
fmgr_ha_file_quota: "2048"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import FMGRMethods
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def fmgr_set_ha_mode(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
if paramgram["fmgr_ha_cluster_pw"] is not None and str(paramgram["fmgr_ha_mode"].lower()) != "standalone":
datagram = {
"mode": paramgram["fmgr_ha_mode"],
"file-quota": paramgram["fmgr_ha_file_quota"],
"hb-interval": paramgram["fmgr_ha_hb_interval"],
"hb-lost-threshold": paramgram["fmgr_ha_hb_threshold"],
"password": paramgram["fmgr_ha_cluster_pw"],
"clusterid": paramgram["fmgr_ha_cluster_id"]
}
elif str(paramgram["fmgr_ha_mode"].lower()) == "standalone":
datagram = {
"mode": paramgram["fmgr_ha_mode"],
"file-quota": paramgram["fmgr_ha_file_quota"],
"hb-interval": paramgram["fmgr_ha_hb_interval"],
"hb-lost-threshold": paramgram["fmgr_ha_hb_threshold"],
"clusterid": paramgram["fmgr_ha_cluster_id"]
}
url = '/cli/global/system/ha'
response = fmgr.process_request(url, datagram, FMGRMethods.SET)
return response
def fmgr_get_ha_peer_list(fmgr):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
datagram = {}
paramgram = {}
url = '/cli/global/system/ha/peer/'
response = fmgr.process_request(url, datagram, FMGRMethods.GET)
return response
def fmgr_set_ha_peer(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
datagram = {
"ip": paramgram["fmgr_ha_peer_ipv4"],
"ip6": paramgram["fmgr_ha_peer_ipv6"],
"serial-number": paramgram["fmgr_ha_peer_sn"],
"status": paramgram["fmgr_ha_peer_status"],
"id": paramgram["peer_id"]
}
url = '/cli/global/system/ha/peer/'
response = fmgr.process_request(url, datagram, FMGRMethods.SET)
return response
def main():
argument_spec = dict(
fmgr_ha_mode=dict(required=False, type="str", choices=["standalone", "master", "slave"]),
fmgr_ha_cluster_pw=dict(required=False, type="str", no_log=True),
fmgr_ha_peer_status=dict(required=False, type="str", choices=["enable", "disable"]),
fmgr_ha_peer_sn=dict(required=False, type="str"),
fmgr_ha_peer_ipv4=dict(required=False, type="str"),
fmgr_ha_peer_ipv6=dict(required=False, type="str"),
fmgr_ha_hb_threshold=dict(required=False, type="int", default=3),
fmgr_ha_hb_interval=dict(required=False, type="int", default=5),
fmgr_ha_file_quota=dict(required=False, type="int", default=4096),
fmgr_ha_cluster_id=dict(required=False, type="int", default=1)
)
required_if = [
['fmgr_ha_peer_ipv4', 'present', ['fmgr_ha_peer_sn', 'fmgr_ha_peer_status']],
['fmgr_ha_peer_ipv6', 'present', ['fmgr_ha_peer_sn', 'fmgr_ha_peer_status']],
['fmgr_ha_mode', 'master', ['fmgr_ha_cluster_pw', 'fmgr_ha_cluster_id']],
['fmgr_ha_mode', 'slave', ['fmgr_ha_cluster_pw', 'fmgr_ha_cluster_id']],
]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, required_if=required_if)
paramgram = {
"fmgr_ha_mode": module.params["fmgr_ha_mode"],
"fmgr_ha_cluster_pw": module.params["fmgr_ha_cluster_pw"],
"fmgr_ha_peer_status": module.params["fmgr_ha_peer_status"],
"fmgr_ha_peer_sn": module.params["fmgr_ha_peer_sn"],
"fmgr_ha_peer_ipv4": module.params["fmgr_ha_peer_ipv4"],
"fmgr_ha_peer_ipv6": module.params["fmgr_ha_peer_ipv6"],
"fmgr_ha_hb_threshold": module.params["fmgr_ha_hb_threshold"],
"fmgr_ha_hb_interval": module.params["fmgr_ha_hb_interval"],
"fmgr_ha_file_quota": module.params["fmgr_ha_file_quota"],
"fmgr_ha_cluster_id": module.params["fmgr_ha_cluster_id"],
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
# INIT FLAGS AND COUNTERS
get_ha_peers = 0
results = DEFAULT_RESULT_OBJ
try:
if any(v is not None for v in (paramgram["fmgr_ha_peer_sn"], paramgram["fmgr_ha_peer_ipv4"],
paramgram["fmgr_ha_peer_ipv6"], paramgram["fmgr_ha_peer_status"])):
get_ha_peers = 1
except Exception as err:
raise FMGBaseException(err)
try:
# IF HA MODE IS NOT NULL, SWITCH THAT
if paramgram["fmgr_ha_mode"] is not None:
if (str.lower(paramgram["fmgr_ha_mode"]) != "standalone" and paramgram["fmgr_ha_cluster_pw"] is not None)\
or str.lower(paramgram["fmgr_ha_mode"]) == "standalone":
results = fmgr_set_ha_mode(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=False,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
elif str.lower(paramgram["fmgr_ha_mode"]) != "standalone" and\
paramgram["fmgr_ha_mode"] is not None and\
paramgram["fmgr_ha_cluster_pw"] is None:
module.exit_json(msg="If setting HA Mode of MASTER or SLAVE, you must specify a cluster password")
except Exception as err:
raise FMGBaseException(err)
# IF GET_HA_PEERS IS ENABLED, LETS PROCESS THE PEERS
try:
if get_ha_peers == 1:
# GET THE CURRENT LIST OF PEERS FROM THE NODE
peers = fmgr_get_ha_peer_list(fmgr)
# GET LENGTH OF RETURNED PEERS LIST AND ADD ONE FOR THE NEXT ID
paramgram["next_peer_id"] = len(peers[1]) + 1
# SET THE ACTUAL NUMBER OF PEERS
num_of_peers = len(peers[1])
# SET THE PEER ID FOR DISABLE METHOD
paramgram["peer_id"] = len(peers) - 1
# SET THE PEER LOOPCOUNT TO 1 TO START THE LOOP
peer_loopcount = 1
# LOOP THROUGH PEERS TO FIND THE SERIAL NUMBER MATCH TO GET THE RIGHT PEER ID
# IDEA BEING WE DON'T WANT TO SUBMIT A BAD peer_id THAT DOESN'T JIVE WITH CURRENT DB ON FMG
# SO LETS SEARCH FOR IT, AND IF WE FIND IT, WE WILL CHANGE THE PEER ID VARIABLES TO MATCH
# IF NOT FOUND, LIFE GOES ON AND WE ASSUME THAT WE'RE ADDING A PEER
# AT WHICH POINT THE next_peer_id VARIABLE WILL HAVE THE RIGHT PRIMARY KEY
if paramgram["fmgr_ha_peer_sn"] is not None:
while peer_loopcount <= num_of_peers:
# GET THE SERIAL NUMBER FOR CURRENT PEER IN LOOP TO COMPARE TO SN IN PLAYBOOK
try:
sn_compare = peers[1][peer_loopcount - 1]["serial-number"]
# IF THE SN IN THE PEERS MATCHES THE PLAYBOOK SN, SET THE IDS
if sn_compare == paramgram["fmgr_ha_peer_sn"]:
paramgram["peer_id"] = peer_loopcount
paramgram["next_peer_id"] = paramgram["peer_id"]
except Exception as err:
raise FMGBaseException(err)
# ADVANCE THE LOOP AND REPEAT UNTIL DONE
peer_loopcount += 1
# IF THE PEER STATUS ISN'T IN THE PLAYBOOK, ASSUME ITS ENABLE
if paramgram["fmgr_ha_peer_status"] is None:
paramgram["fmgr_ha_peer_status"] = "enable"
# IF THE PEER STATUS IS ENABLE, USE THE next_peer_id IN THE API CALL FOR THE ID
if paramgram["fmgr_ha_peer_status"] == "enable":
results = fmgr_set_ha_peer(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=True,
ansible_facts=fmgr.construct_ansible_facts(results,
module.params, paramgram))
# IF THE PEER STATUS IS DISABLE, WE HAVE TO HANDLE THAT A BIT DIFFERENTLY
# JUST USING TWO DIFFERENT peer_id 's HERE
if paramgram["fmgr_ha_peer_status"] == "disable":
results = fmgr_set_ha_peer(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=True,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
|
gpl-3.0
|
pupapaik/contrail-neutron-plugin
|
neutron_plugin_contrail/plugins/opencontrail/vnc_client/route_table_res_handler.py
|
7
|
7374
|
# Copyright 2015. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cfgm_common import exceptions as vnc_exc
from vnc_api import vnc_api
import contrail_res_handler as res_handler
class RouteTableMixin(object):
def _route_table_vnc_to_neutron(self, rt_obj):
rt_q_dict = self._vnc_lib.obj_to_dict(rt_obj)
# replace field names
rt_q_dict['id'] = rt_obj.uuid
rt_q_dict['tenant_id'] = self._project_id_vnc_to_neutron(
rt_obj.parent_uuid)
rt_q_dict['name'] = rt_obj.name
rt_q_dict['fq_name'] = rt_obj.fq_name
# get route table routes
rt_q_dict['routes'] = rt_q_dict.pop('routes', None)
if rt_q_dict['routes']:
for route in rt_q_dict['routes']['route']:
if route['next_hop_type']:
route['next_hop'] = route['next_hop_type']
return rt_q_dict
# end _route_table_vnc_to_neutron
class RouteTableBaseGet(res_handler.ResourceGetHandler):
resource_get_method = "route_table_read"
class RouteTableGetHandler(RouteTableBaseGet,
RouteTableMixin):
resource_list_method = "route_tables_list"
detail = False
def resource_get(self, context, rt_id, fields=None):
try:
rt_obj = self._resource_get(id=rt_id)
except vnc_exc.NoIdError:
# TODO() add route table specific exception
self._raise_contrail_exception(
'NetworkNotFound', net_id=rt_id, resource='route_table')
return self._route_table_vnc_to_neutron(rt_obj)
def resource_list_by_project(self, project_id):
try:
project_uuid = self._project_id_neutron_to_vnc(project_id)
except Exception:
print("Error in converting uuid %s" % (project_id))
resp_dict = self._resource_list(parent_id=project_uuid)
return resp_dict['route-tables']
def resource_list(self, context, filters=None, fields=None):
ret_list = []
# collect phase
all_rts = [] # all rts in all projects
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(
context,
filters['tenant_id'])
for p_id in project_ids:
project_rts = self.resource_list_by_project(p_id)
all_rts.append(project_rts)
elif filters and 'name' in filters:
p_id = self._project_id_neutron_to_vnc(context['tenant'])
project_rts = self.resource_list_by_project(p_id)
all_rts.append(project_rts)
else: # no filters
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_rts = self.resource_list_by_project(proj_id)
all_rts.append(project_rts)
# prune phase
for project_rts in all_rts:
for proj_rt in project_rts:
# TODO() implement same for name specified in filter
proj_rt_id = proj_rt['uuid']
if not self._filters_is_present(filters, 'id', proj_rt_id):
continue
rt_info = self.resource_get(proj_rt_id)
if not self._filters_is_present(filters, 'name',
rt_info['name']):
continue
ret_list.append(rt_info)
return ret_list
class RouteTableCreateHandler(res_handler.ResourceCreateHandler):
resource_create_method = "route_table_create"
def resource_create(self, context, rt_q):
project_id = self._project_id_neutron_to_vnc(rt_q['tenant_id'])
project_obj = self._project_read(proj_id=project_id)
rt_obj = vnc_api.RouteTable(name=rt_q['name'],
parent_obj=project_obj)
if rt_q['routes']:
for route in rt_q['routes']['route']:
try:
vm_obj = self._vnc_lib.virtual_machine_read(
id=route['next_hop'])
si_list = vm_obj.get_service_instance_refs()
if si_list:
fq_name = si_list[0]['to']
si_obj = self._vnc_lib.service_instance_read(
fq_name=fq_name)
route['next_hop'] = si_obj.get_fq_name_str()
rt_obj.set_routes(
vnc_api.RouteTableType.factory(**rt_q['routes']))
except Exception as e:
pass
try:
self._resource_create(rt_obj)
except vnc_exc.RefsExistError as e:
self._raise_contrail_exception(
'BadRequest',
resource='route_table', msg=str(e))
ret_rt_q = self._route_table_vnc_to_neutron(rt_obj)
return ret_rt_q
class RouteTableUpdateHandler(res_handler.ResourceUpdateHandler,
RouteTableBaseGet,
RouteTableMixin):
resource_update_method = "route_table_update"
def resource_update(self, context, rt_id, rt_q):
rt_q['id'] = rt_id
try:
rt_obj = self._resource_get(id=rt_q['id'])
except vnc_exc.NoIdError:
raise self._raise_contrail_exception(
'ResourceNotFound', id=rt_id, resource='route_table')
if rt_q['routes']:
for route in rt_q['routes']['route']:
try:
vm_obj = self._vnc_lib.virtual_machine_read(
id=route['next_hop'])
si_list = vm_obj.get_service_instance_refs()
if si_list:
fq_name = si_list[0]['to']
si_obj = self._vnc_lib.service_instance_read(
fq_name=fq_name)
route['next_hop'] = si_obj.get_fq_name_str()
rt_obj.set_routes(
vnc_api.RouteTableType.factory(**rt_q['routes']))
except Exception:
pass
self._resource_update(rt_obj)
return self._route_table_vnc_to_neutron(rt_obj)
class RouteTableDeleteHandler(res_handler.ResourceDeleteHandler):
resource_delete_method = "route_table_delete"
def resource_delete(self, context, rt_id):
try:
self._resource_delete(rt_id)
except vnc_exc.NoIdError:
raise self._raise_contrail_exception(
"ResourceNotFound", id=rt_id, resource='route_table')
class RouteTableHandler(RouteTableGetHandler,
RouteTableCreateHandler,
RouteTableUpdateHandler,
RouteTableDeleteHandler):
pass
|
apache-2.0
|
anryko/ansible
|
lib/ansible/modules/net_tools/nios/nios_naptr_record.py
|
68
|
5884
|
#!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_naptr_record
version_added: "2.7"
author: "Blair Rampling (@brampling)"
short_description: Configure Infoblox NIOS NAPTR records
description:
- Adds and/or removes instances of NAPTR record objects from
Infoblox NIOS servers. This module manages NIOS C(record:naptr) objects
using the Infoblox WAPI interface over REST.
requirements:
- infoblox_client
extends_documentation_fragment: nios
options:
name:
description:
- Specifies the fully qualified hostname to add or remove from
the system
required: true
view:
description:
- Sets the DNS view to associate this a record with. The DNS
view must already be configured on the system
required: true
default: default
aliases:
- dns_view
order:
description:
- Configures the order (0-65535) for this NAPTR record. This parameter
specifies the order in which the NAPTR rules are applied when
multiple rules are present.
required: true
preference:
description:
- Configures the preference (0-65535) for this NAPTR record. The
preference field determines the order NAPTR records are processed
when multiple records with the same order parameter are present.
required: true
replacement:
description:
- Configures the replacement field for this NAPTR record.
For nonterminal NAPTR records, this field specifies the
next domain name to look up.
required: true
services:
description:
- Configures the services field (128 characters maximum) for this
NAPTR record. The services field contains protocol and service
identifiers, such as "http+E2U" or "SIPS+D2T".
required: false
flags:
description:
- Configures the flags field for this NAPTR record. These control the
interpretation of the fields for an NAPTR record object. Supported
values for the flags field are "U", "S", "P" and "A".
required: false
regexp:
description:
- Configures the regexp field for this NAPTR record. This is the
regular expression-based rewriting rule of the NAPTR record. This
should be a POSIX compliant regular expression, including the
substitution rule and flags. Refer to RFC 2915 for the field syntax
details.
required: false
ttl:
description:
- Configures the TTL to be associated with this NAPTR record
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure a NAPTR record
nios_naptr_record:
name: '*.subscriber-100.ansiblezone.com'
order: 1000
preference: 10
replacement: replacement1.network.ansiblezone.com
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: add a comment to an existing NAPTR record
nios_naptr_record:
name: '*.subscriber-100.ansiblezone.com'
order: 1000
preference: 10
replacement: replacement1.network.ansiblezone.com
comment: this is a test comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove a NAPTR record from the system
nios_naptr_record:
name: '*.subscriber-100.ansiblezone.com'
order: 1000
preference: 10
replacement: replacement1.network.ansiblezone.com
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.net_tools.nios.api import WapiModule
def main():
''' Main entry point for module execution
'''
ib_spec = dict(
name=dict(required=True, ib_req=True),
view=dict(default='default', aliases=['dns_view'], ib_req=True),
order=dict(type='int', ib_req=True),
preference=dict(type='int', ib_req=True),
replacement=dict(ib_req=True),
services=dict(),
flags=dict(),
regexp=dict(),
ttl=dict(type='int'),
extattrs=dict(type='dict'),
comment=dict(),
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run('record:naptr', ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
rkibria/yapyg
|
demo/main.py
|
1
|
6331
|
# Copyright (c) 2015 Raihan Kibria
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
SCREEN_LOGICAL_WIDTH = 480
SCREEN_LOGICAL_HEIGHT = 800
TILE_SIZE = 128
import yapyg.bootstrap
SCREEN_SCALE = 1.0
SCREEN_REAL_WIDTH = int(SCREEN_LOGICAL_WIDTH * SCREEN_SCALE)
SCREEN_REAL_HEIGHT = int(SCREEN_LOGICAL_HEIGHT * SCREEN_SCALE)
yapyg.bootstrap.initialize_screen(SCREEN_REAL_WIDTH, SCREEN_REAL_HEIGHT)
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.stacklayout import StackLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.spinner import Spinner
from kivy.uix.image import Image
from kivy.uix.checkbox import CheckBox
from kivy.uix.boxlayout import BoxLayout
from kivy import platform
from yapyg_widgets.screen_widget import ScreenWidget
DEFAULT_START_CHOICE = "demo_car"
class MenuWidget(FloatLayout):
def __init__(self, **kwargs):
super(MenuWidget, self).__init__(**kwargs)
self.choices = {
"demo_bounce": "Basic physics simulation",
"demo_breakout": "Breakout demo",
"demo_gauntlet": "Top down tile map game",
"demo_starship": "'Endless' scrolling background and animation",
"demo_text": "Text drawing",
"demo_tiles": "Tile map scrolling",
"demo_pinball": "Simple pinball demo",
"demo_squares": "Random set of falling physical objects",
"demo_car": "Top down car driving game",
"demo_sprites": "Draw a large amount of sprites",
}
layout = StackLayout(orientation="tb-lr", padding=[10, 20, 10, 20])
layout.add_widget(Image(source="assets/img/ui/logo.png", size_hint=(1, 0.4), allow_stretch = True,))
layout.add_widget(Label(text="Choose demo:", size_hint=(1, 0.1)))
self.spinner = Spinner(text=DEFAULT_START_CHOICE, values=[x for x in self.choices.iterkeys()], size_hint=(1, 0.1))
layout.add_widget(self.spinner)
self.spinner.bind(text=self.show_selected_value)
self.description_label = Label(text=self.choices[DEFAULT_START_CHOICE], valign="middle", halign="center", size_hint=(1, 0.2))
self.description_label.bind(size=self.description_label.setter("text_size"))
layout.add_widget(self.description_label)
run_button = Button(text="Run", size_hint=(1, 0.1))
run_button.bind(state=self.on_run)
layout.add_widget(run_button)
debug_layout = BoxLayout(orientation='horizontal', size_hint=(1, 0.1))
debug_layout.add_widget(Label(text=" "))
self.debug_checkbox = CheckBox()
self.debug_checkbox.active = False
debug_layout.add_widget(self.debug_checkbox)
debug_layout.add_widget(Label(text="Show debug info", valign="middle", halign="center"))
debug_layout.add_widget(Label(text=" "))
debug_layout.add_widget(Label(text=" "))
self.add_widget(debug_layout)
self.add_widget(layout)
if platform == 'win' or platform == 'linux' or platform == 'macosx':
Window.bind(on_key_up=self._on_keyboard_up)
def _on_keyboard_up(self, window, keycode, scancode):
self.on_run(None, None)
def show_selected_value(self, spinner, value):
self.description_label.text = self.choices[value]
def on_run(self, instance, value):
if self.parent:
parent = self.parent
parent.remove_widget(self)
state = None
module_name = self.spinner.text
global DEFAULT_START_CHOICE
DEFAULT_START_CHOICE = module_name
exec("import %s" % module_name)
exec("state = %s.create(SCREEN_LOGICAL_WIDTH, SCREEN_LOGICAL_HEIGHT, TILE_SIZE)" % self.spinner.text)
parent.add_widget(ScreenWidget(state,
self.on_exit_game,
self.debug_checkbox.active
)
)
def on_exit_game(self, state, parent_widget):
parent_widget.add_widget(MenuWidget())
class YapygDemoApp(App):
def build(self):
if True: # False to load a demo directly without the menu!
return MenuWidget()
else:
import demo_gauntlet
state = demo_gauntlet.create(SCREEN_LOGICAL_WIDTH, SCREEN_LOGICAL_HEIGHT, TILE_SIZE)
return ScreenWidget(state,
(float(Window.width) / SCREEN_LOGICAL_WIDTH),
None, False)
if __name__ == "__main__":
YapygDemoApp().run()
|
mit
|
krisys/django
|
tests/utils_tests/test_timezone.py
|
149
|
7857
|
import copy
import datetime
import pickle
import unittest
from django.test import override_settings
from django.utils import timezone
try:
import pytz
except ImportError:
pytz = None
requires_pytz = unittest.skipIf(pytz is None, "this test requires pytz")
if pytz is not None:
CET = pytz.timezone("Europe/Paris")
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
class TimezoneTests(unittest.TestCase):
def test_localtime(self):
now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc)
local_tz = timezone.LocalTimezone()
local_now = timezone.localtime(now, local_tz)
self.assertEqual(local_now.tzinfo, local_tz)
def test_localtime_naive(self):
with self.assertRaises(ValueError):
timezone.localtime(datetime.datetime.now())
def test_localtime_out_of_range(self):
local_tz = timezone.LocalTimezone()
long_ago = datetime.datetime(1900, 1, 1, tzinfo=timezone.utc)
try:
timezone.localtime(long_ago, local_tz)
except (OverflowError, ValueError) as exc:
self.assertIn("install pytz", exc.args[0])
else:
raise unittest.SkipTest("Failed to trigger an OverflowError or ValueError")
def test_now(self):
with override_settings(USE_TZ=True):
self.assertTrue(timezone.is_aware(timezone.now()))
with override_settings(USE_TZ=False):
self.assertTrue(timezone.is_naive(timezone.now()))
def test_override(self):
default = timezone.get_default_timezone()
try:
timezone.activate(ICT)
with timezone.override(EAT):
self.assertIs(EAT, timezone.get_current_timezone())
self.assertIs(ICT, timezone.get_current_timezone())
with timezone.override(None):
self.assertIs(default, timezone.get_current_timezone())
self.assertIs(ICT, timezone.get_current_timezone())
timezone.deactivate()
with timezone.override(EAT):
self.assertIs(EAT, timezone.get_current_timezone())
self.assertIs(default, timezone.get_current_timezone())
with timezone.override(None):
self.assertIs(default, timezone.get_current_timezone())
self.assertIs(default, timezone.get_current_timezone())
finally:
timezone.deactivate()
def test_override_decorator(self):
default = timezone.get_default_timezone()
@timezone.override(EAT)
def func_tz_eat():
self.assertIs(EAT, timezone.get_current_timezone())
@timezone.override(None)
def func_tz_none():
self.assertIs(default, timezone.get_current_timezone())
try:
timezone.activate(ICT)
func_tz_eat()
self.assertIs(ICT, timezone.get_current_timezone())
func_tz_none()
self.assertIs(ICT, timezone.get_current_timezone())
timezone.deactivate()
func_tz_eat()
self.assertIs(default, timezone.get_current_timezone())
func_tz_none()
self.assertIs(default, timezone.get_current_timezone())
finally:
timezone.deactivate()
def test_copy(self):
self.assertIsInstance(copy.copy(timezone.UTC()), timezone.UTC)
self.assertIsInstance(copy.copy(timezone.LocalTimezone()), timezone.LocalTimezone)
def test_deepcopy(self):
self.assertIsInstance(copy.deepcopy(timezone.UTC()), timezone.UTC)
self.assertIsInstance(copy.deepcopy(timezone.LocalTimezone()), timezone.LocalTimezone)
def test_pickling_unpickling(self):
self.assertIsInstance(pickle.loads(pickle.dumps(timezone.UTC())), timezone.UTC)
self.assertIsInstance(pickle.loads(pickle.dumps(timezone.LocalTimezone())), timezone.LocalTimezone)
def test_is_aware(self):
self.assertTrue(timezone.is_aware(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
self.assertFalse(timezone.is_aware(datetime.datetime(2011, 9, 1, 13, 20, 30)))
def test_is_naive(self):
self.assertFalse(timezone.is_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
self.assertTrue(timezone.is_naive(datetime.datetime(2011, 9, 1, 13, 20, 30)))
def test_make_aware(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
with self.assertRaises(ValueError):
timezone.make_aware(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT)
def test_make_naive(self):
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30))
self.assertEqual(
timezone.make_naive(datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT), EAT),
datetime.datetime(2011, 9, 1, 13, 20, 30))
with self.assertRaises(ValueError):
timezone.make_naive(datetime.datetime(2011, 9, 1, 13, 20, 30), EAT)
@requires_pytz
def test_make_aware2(self):
self.assertEqual(
timezone.make_aware(datetime.datetime(2011, 9, 1, 12, 20, 30), CET),
CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)))
with self.assertRaises(ValueError):
timezone.make_aware(CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)), CET)
@requires_pytz
def test_make_aware_pytz(self):
self.assertEqual(
timezone.make_naive(CET.localize(datetime.datetime(2011, 9, 1, 12, 20, 30)), CET),
datetime.datetime(2011, 9, 1, 12, 20, 30))
self.assertEqual(
timezone.make_naive(
pytz.timezone("Asia/Bangkok").localize(datetime.datetime(2011, 9, 1, 17, 20, 30)), CET
),
datetime.datetime(2011, 9, 1, 12, 20, 30))
with self.assertRaises(ValueError):
timezone.make_naive(datetime.datetime(2011, 9, 1, 12, 20, 30), CET)
@requires_pytz
def test_make_aware_pytz_ambiguous(self):
# 2:30 happens twice, once before DST ends and once after
ambiguous = datetime.datetime(2015, 10, 25, 2, 30)
with self.assertRaises(pytz.AmbiguousTimeError):
timezone.make_aware(ambiguous, timezone=CET)
std = timezone.make_aware(ambiguous, timezone=CET, is_dst=False)
dst = timezone.make_aware(ambiguous, timezone=CET, is_dst=True)
self.assertEqual(std - dst, datetime.timedelta(hours=1))
self.assertEqual(std.tzinfo.utcoffset(std), datetime.timedelta(hours=1))
self.assertEqual(dst.tzinfo.utcoffset(dst), datetime.timedelta(hours=2))
@requires_pytz
def test_make_aware_pytz_non_existent(self):
# 2:30 never happened due to DST
non_existent = datetime.datetime(2015, 3, 29, 2, 30)
with self.assertRaises(pytz.NonExistentTimeError):
timezone.make_aware(non_existent, timezone=CET)
std = timezone.make_aware(non_existent, timezone=CET, is_dst=False)
dst = timezone.make_aware(non_existent, timezone=CET, is_dst=True)
self.assertEqual(std - dst, datetime.timedelta(hours=1))
self.assertEqual(std.tzinfo.utcoffset(std), datetime.timedelta(hours=1))
self.assertEqual(dst.tzinfo.utcoffset(dst), datetime.timedelta(hours=2))
# round trip to UTC then back to CET
std = timezone.localtime(timezone.localtime(std, timezone.UTC()), CET)
dst = timezone.localtime(timezone.localtime(dst, timezone.UTC()), CET)
self.assertEqual((std.hour, std.minute), (3, 30))
self.assertEqual((dst.hour, dst.minute), (1, 30))
|
bsd-3-clause
|
san-bil/proxTV
|
prox_tv/demos/demo_filter_image_threads.py
|
2
|
1282
|
### Example script showing how to run proxTV solvers in multiple parallel threads
import prox_tv as ptv
import numpy as np
from pylab import *
import matplotlib.pyplot as plt
import time
import skimage as ski
from skimage import data, io, filters, color, util
# WRITE HERE YOUR NUMBER OF THREADS TO TEST
THREADS = [1, 2, 3, 4, 5, 6, 7, 8];
# Load image
X = io.imread('QRbig.png')
X = ski.img_as_float(X)
X = color.rgb2gray(X)
# Introduce noise
noiseLevel = 0.2
N = util.random_noise(X, mode='gaussian', var=noiseLevel)
# Iterate over number of threads
lam=50./255.
times = []
for threads in THREADS:
print('Filtering image with ' + str(threads) + ' threads...');
start = time.time()
F = ptv.tv1_2d(N, lam, n_threads=threads)
end = time.time()
times.append(end-start)
print('Elapsed time ' + str(end-start))
# Plot filtering results
plt.subplot(1, 3, 1)
io.imshow(X)
plt.xlabel('Original')
plt.subplot(1, 3, 2)
io.imshow(N)
plt.title('2D TVL1 filtering')
plt.xlabel('Noisy')
plt.subplot(1, 3, 3)
io.imshow(F)
plt.xlabel('Filtered')
show()
# Plot timing results
fig, ax = plt.subplots()
ax.bar(THREADS, times, color = 'g')
ax.set_xlabel('Number of threads')
ax.set_ylabel('Time (s)')
ax.set_title('Filtering times for increasing threads')
plt.show()
|
bsd-2-clause
|
mwmuni/LIGGGHTS_GUI
|
OpenGL/raw/GL/ARB/program_interface_query.py
|
9
|
4510
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_program_interface_query'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_program_interface_query',error_checker=_errors._error_checker)
GL_ACTIVE_RESOURCES=_C('GL_ACTIVE_RESOURCES',0x92F5)
GL_ACTIVE_VARIABLES=_C('GL_ACTIVE_VARIABLES',0x9305)
GL_ARRAY_SIZE=_C('GL_ARRAY_SIZE',0x92FB)
GL_ARRAY_STRIDE=_C('GL_ARRAY_STRIDE',0x92FE)
GL_ATOMIC_COUNTER_BUFFER=_C('GL_ATOMIC_COUNTER_BUFFER',0x92C0)
GL_ATOMIC_COUNTER_BUFFER_INDEX=_C('GL_ATOMIC_COUNTER_BUFFER_INDEX',0x9301)
GL_BLOCK_INDEX=_C('GL_BLOCK_INDEX',0x92FD)
GL_BUFFER_BINDING=_C('GL_BUFFER_BINDING',0x9302)
GL_BUFFER_DATA_SIZE=_C('GL_BUFFER_DATA_SIZE',0x9303)
GL_BUFFER_VARIABLE=_C('GL_BUFFER_VARIABLE',0x92E5)
GL_COMPATIBLE_SUBROUTINES=_C('GL_COMPATIBLE_SUBROUTINES',0x8E4B)
GL_COMPUTE_SUBROUTINE=_C('GL_COMPUTE_SUBROUTINE',0x92ED)
GL_COMPUTE_SUBROUTINE_UNIFORM=_C('GL_COMPUTE_SUBROUTINE_UNIFORM',0x92F3)
GL_FRAGMENT_SUBROUTINE=_C('GL_FRAGMENT_SUBROUTINE',0x92EC)
GL_FRAGMENT_SUBROUTINE_UNIFORM=_C('GL_FRAGMENT_SUBROUTINE_UNIFORM',0x92F2)
GL_GEOMETRY_SUBROUTINE=_C('GL_GEOMETRY_SUBROUTINE',0x92EB)
GL_GEOMETRY_SUBROUTINE_UNIFORM=_C('GL_GEOMETRY_SUBROUTINE_UNIFORM',0x92F1)
GL_IS_PER_PATCH=_C('GL_IS_PER_PATCH',0x92E7)
GL_IS_ROW_MAJOR=_C('GL_IS_ROW_MAJOR',0x9300)
GL_LOCATION=_C('GL_LOCATION',0x930E)
GL_LOCATION_INDEX=_C('GL_LOCATION_INDEX',0x930F)
GL_MATRIX_STRIDE=_C('GL_MATRIX_STRIDE',0x92FF)
GL_MAX_NAME_LENGTH=_C('GL_MAX_NAME_LENGTH',0x92F6)
GL_MAX_NUM_ACTIVE_VARIABLES=_C('GL_MAX_NUM_ACTIVE_VARIABLES',0x92F7)
GL_MAX_NUM_COMPATIBLE_SUBROUTINES=_C('GL_MAX_NUM_COMPATIBLE_SUBROUTINES',0x92F8)
GL_NAME_LENGTH=_C('GL_NAME_LENGTH',0x92F9)
GL_NUM_ACTIVE_VARIABLES=_C('GL_NUM_ACTIVE_VARIABLES',0x9304)
GL_NUM_COMPATIBLE_SUBROUTINES=_C('GL_NUM_COMPATIBLE_SUBROUTINES',0x8E4A)
GL_OFFSET=_C('GL_OFFSET',0x92FC)
GL_PROGRAM_INPUT=_C('GL_PROGRAM_INPUT',0x92E3)
GL_PROGRAM_OUTPUT=_C('GL_PROGRAM_OUTPUT',0x92E4)
GL_REFERENCED_BY_COMPUTE_SHADER=_C('GL_REFERENCED_BY_COMPUTE_SHADER',0x930B)
GL_REFERENCED_BY_FRAGMENT_SHADER=_C('GL_REFERENCED_BY_FRAGMENT_SHADER',0x930A)
GL_REFERENCED_BY_GEOMETRY_SHADER=_C('GL_REFERENCED_BY_GEOMETRY_SHADER',0x9309)
GL_REFERENCED_BY_TESS_CONTROL_SHADER=_C('GL_REFERENCED_BY_TESS_CONTROL_SHADER',0x9307)
GL_REFERENCED_BY_TESS_EVALUATION_SHADER=_C('GL_REFERENCED_BY_TESS_EVALUATION_SHADER',0x9308)
GL_REFERENCED_BY_VERTEX_SHADER=_C('GL_REFERENCED_BY_VERTEX_SHADER',0x9306)
GL_SHADER_STORAGE_BLOCK=_C('GL_SHADER_STORAGE_BLOCK',0x92E6)
GL_TESS_CONTROL_SUBROUTINE=_C('GL_TESS_CONTROL_SUBROUTINE',0x92E9)
GL_TESS_CONTROL_SUBROUTINE_UNIFORM=_C('GL_TESS_CONTROL_SUBROUTINE_UNIFORM',0x92EF)
GL_TESS_EVALUATION_SUBROUTINE=_C('GL_TESS_EVALUATION_SUBROUTINE',0x92EA)
GL_TESS_EVALUATION_SUBROUTINE_UNIFORM=_C('GL_TESS_EVALUATION_SUBROUTINE_UNIFORM',0x92F0)
GL_TOP_LEVEL_ARRAY_SIZE=_C('GL_TOP_LEVEL_ARRAY_SIZE',0x930C)
GL_TOP_LEVEL_ARRAY_STRIDE=_C('GL_TOP_LEVEL_ARRAY_STRIDE',0x930D)
GL_TRANSFORM_FEEDBACK_VARYING=_C('GL_TRANSFORM_FEEDBACK_VARYING',0x92F4)
GL_TYPE=_C('GL_TYPE',0x92FA)
GL_UNIFORM=_C('GL_UNIFORM',0x92E1)
GL_UNIFORM_BLOCK=_C('GL_UNIFORM_BLOCK',0x92E2)
GL_VERTEX_SUBROUTINE=_C('GL_VERTEX_SUBROUTINE',0x92E8)
GL_VERTEX_SUBROUTINE_UNIFORM=_C('GL_VERTEX_SUBROUTINE_UNIFORM',0x92EE)
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetProgramInterfaceiv(program,programInterface,pname,params):pass
@_f
@_p.types(_cs.GLuint,_cs.GLuint,_cs.GLenum,arrays.GLcharArray)
def glGetProgramResourceIndex(program,programInterface,name):pass
@_f
@_p.types(_cs.GLint,_cs.GLuint,_cs.GLenum,arrays.GLcharArray)
def glGetProgramResourceLocation(program,programInterface,name):pass
@_f
@_p.types(_cs.GLint,_cs.GLuint,_cs.GLenum,arrays.GLcharArray)
def glGetProgramResourceLocationIndex(program,programInterface,name):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetProgramResourceName(program,programInterface,index,bufSize,length,name):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,_cs.GLsizei,arrays.GLuintArray,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLintArray)
def glGetProgramResourceiv(program,programInterface,index,propCount,props,bufSize,length,params):pass
|
gpl-3.0
|
obonyojimmy/hospital
|
node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py
|
2698
|
3270
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
|
mit
|
JJRcop/tgstation
|
tools/midi2piano/pyperclip/__init__.py
|
110
|
3343
|
"""
Pyperclip
A cross-platform clipboard module for Python. (only handles plain text for now)
By Al Sweigart [email protected]
BSD License
Usage:
import pyperclip
pyperclip.copy('The text to be copied to the clipboard.')
spam = pyperclip.paste()
if not pyperclip.copy:
print("Copy functionality unavailable!")
On Windows, no additional modules are needed.
On Mac, the module uses pbcopy and pbpaste, which should come with the os.
On Linux, install xclip or xsel via package manager. For example, in Debian:
sudo apt-get install xclip
Otherwise on Linux, you will need the gtk or PyQt4 modules installed.
gtk and PyQt4 modules are not available for Python 3,
and this module does not work with PyGObject yet.
"""
__version__ = '1.5.27'
import platform
import os
import subprocess
from .clipboards import (init_osx_clipboard,
init_gtk_clipboard, init_qt_clipboard,
init_xclip_clipboard, init_xsel_clipboard,
init_klipper_clipboard, init_no_clipboard)
from .windows import init_windows_clipboard
# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
# Thus, we need to detect the presence of $DISPLAY manually
# and not load PyQt4 if it is absent.
HAS_DISPLAY = os.getenv("DISPLAY", False)
CHECK_CMD = "where" if platform.system() == "Windows" else "which"
def _executable_exists(name):
return subprocess.call([CHECK_CMD, name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def determine_clipboard():
# Determine the OS/platform and set
# the copy() and paste() functions accordingly.
if 'cygwin' in platform.system().lower():
# FIXME: pyperclip currently does not support Cygwin,
# see https://github.com/asweigart/pyperclip/issues/55
pass
elif os.name == 'nt' or platform.system() == 'Windows':
return init_windows_clipboard()
if os.name == 'mac' or platform.system() == 'Darwin':
return init_osx_clipboard()
if HAS_DISPLAY:
# Determine which command/module is installed, if any.
try:
import gtk # check if gtk is installed
except ImportError:
pass
else:
return init_gtk_clipboard()
try:
import PyQt4 # check if PyQt4 is installed
except ImportError:
pass
else:
return init_qt_clipboard()
if _executable_exists("xclip"):
return init_xclip_clipboard()
if _executable_exists("xsel"):
return init_xsel_clipboard()
if _executable_exists("klipper") and _executable_exists("qdbus"):
return init_klipper_clipboard()
return init_no_clipboard()
def set_clipboard(clipboard):
global copy, paste
clipboard_types = {'osx': init_osx_clipboard,
'gtk': init_gtk_clipboard,
'qt': init_qt_clipboard,
'xclip': init_xclip_clipboard,
'xsel': init_xsel_clipboard,
'klipper': init_klipper_clipboard,
'windows': init_windows_clipboard,
'no': init_no_clipboard}
copy, paste = clipboard_types[clipboard]()
copy, paste = determine_clipboard()
__all__ = ["copy", "paste"]
|
agpl-3.0
|
evilnet/x3
|
src/plugins/hangman/plugin.py
|
3
|
6602
|
# anoy module
import _svc
import re
import fileinput
import random
# HANGMAN !!!!
# /---
# | o
# | /|\
# | / \
# =======
class Game:
target = '' #channel or user's nick who we are playing with
word = ''
maskchar = '*'
man = 0
dictionary = "/usr/share/dict/words"
def __init__(self, irc, target, length=0):
self.irc = irc
self.target = target
length = int(length)
if(length > 3 and length < 100):
self.length = length
else:
self.length = random.randrange(5, 9)
# What constitutes a valid word?
self.valid = re.compile(r"^[a-zA-Z]+$")
self.guesses = {}
if(self.length < 3):
self.reply("You can only play with 3 or more letters")
self.man = 9999
return
if(self.newword(self.length)):
self.reply("HANGMAN is starting!")
self.printstatus()
else:
self.reply("Aborting game")
self.man = 9999
return
def validword(self):
if(len(self.word) == self.length and self.valid.search(self.word)):
return True
return False
def newword(self, length):
numlines = 0
for line in open(self.dictionary, "r"):
numlines += 1
tries = 0
if(numlines < 100):
raise Exception("Dictionary has too few words")
while((not self.validword())): #failsafe dont loop forever...
tries += 1
if(tries > 10):
self.reply("Error finding a %s letter word"%length)
return False
#raise(Exception("DictError", "Unable to find %s letter word"%length))
i = 0
randline = random.randrange(1, numlines-1)
for line in open(self.dictionary, 'r'):
if(i >= randline):
self.word = line.rstrip()
if(not self.validword() and i < randline + 50):
continue
else:
break # give up on this block and try again
i += 1
if(len(self.word) < 3):
self.reply("Unable to find a word in the dictionary!")
return False
return True
def maskedword(self):
mask = []
for i in self.word:
if(i in self.guesses or not i.isalpha()):
mask.append(i)
else:
mask.append(self.maskchar)
return(''.join(mask))
def manpart(self, part, num):
if(self.man >= num):
return part
else:
return " "
def printstatus(self):
print("DEBUG: the word is '%s'"%self.word)
self.reply(" /---%s "%( self.manpart(",", 1 )) )
self.reply(" | %s Make "%( self.manpart("o",2)) )
self.reply(" | %s%s%s your "%( self.manpart("/",4), self.manpart("|",3), self.manpart("\\", 5) ) )
self.reply(" | %s %s guess! "%( self.manpart("/",6), self.manpart("\\",7) ))
self.reply(" ====")
self.reply(self.maskedword())
if(self.won() == True):
self.reply("YOU WON! FOR NOW!!")
elif(self.won() == False):
self.reply("Your DEAD! DEAAAAAAAD!")
def won(self):
if(self.man >= 7):
return False
for i in self.word:
if(not i in self.guesses.keys()):
return None
return True
def guess(self, irc, letter):
self.irc = irc
if(self.won() != None):
self.reply("This game is over. Start another!")
return
if(len(letter) > 1):
self.reply("Guess a single letter only, please.")
return
if(not letter.isalpha()):
self.reply("Letters only. Punctuation will be filled in for you.")
return
if(letter in self.guesses):
self.reply("Pay attention! %s has already been guessed! I'm hanging you anyway!"%letter)
self.man += 1
self.printstatus()
return
self.guesses[letter] = True
if(self.won() != None):
pass
elif(self.word.find(letter) >= 0):
self.reply("YOU GOT ONE! But I'll hang you yet!!")
else:
self.reply("NO! MuaHaHaHaHa!")
self.man += 1
self.printstatus()
def reply(self, msg):
self.irc.send_target_privmsg(self.irc.service, self.target, msg)
class Hangman:
config = {}
def __init__(self, handler, irc):
self.handler = handler
self.name = "hangman"
handler.addcommand(self.name, "start", self.start)
handler.addcommand(self.name, "end", self.end)
handler.addcommand(self.name, "guess", self.guess)
self.games = {} # list of game objects
def target(self, irc):
if(len(irc.target)):
return irc.target
else:
return irc.caller
def start(self, irc, arg):
playwith = self.target(irc)
if(playwith in self.games.keys() and self.games[playwith].won() == None):
irc.reply("There is a game is in progress here, End it before you start another.")
return
if(arg.isdigit()):
self.games[playwith] = Game(irc, playwith, arg)
else:
self.games[playwith] = Game(irc, playwith)
def end(self, irc, unused):
playwith = self.target(irc)
if(self.target(irc) in self.games.keys()):
self.games[playwith].reply("Game ended by %s"%irc.caller)
del(self.games[playwith])
else:
irc.reply("No game here to end")
def guess(self, irc, arg):
playwith = self.target(irc)
if(self.target(irc) in self.games.keys()):
self.games[playwith].guess(irc, arg)
else:
irc.reply("No game here in progress. Start one!")
def dance(self, irc, args):
nick = irc.caller
user = _svc.get_user(nick)
reply = "Ok,"
if(user and "account" in user):
reply += " Mr. %s"%user["account"]
reply += " we can dance"
if(len(args)):
reply += " "
reply += args
reply += "."
irc.reply(reply)
def nickof(self, irc, bot):
info = _svc.get_info()
if(bot and bot in info.keys()):
irc.reply("%s has nick %s"%(bot, info[bot]))
else:
irc.reply("I dunno. Try %s"%str(info.keys()))
Class = Hangman
|
gpl-3.0
|
pombredanne/tahoe-lafs
|
src/allmydata/scripts/create_node.py
|
1
|
7744
|
import os, sys
from allmydata.scripts.common import BasedirOptions, NoDefaultBasedirOptions
from allmydata.scripts.default_nodedir import _default_nodedir
from allmydata.util.assertutil import precondition
from allmydata.util.encodingutil import listdir_unicode, argv_to_unicode, quote_local_unicode_path
from allmydata.util import fileutil
dummy_tac = """
import sys
print("Nodes created by Tahoe-LAFS v1.11.0 or later cannot be run by")
print("releases of Tahoe-LAFS before v1.10.0.")
sys.exit(1)
"""
def write_tac(basedir, nodetype):
fileutil.write(os.path.join(basedir, "tahoe-%s.tac" % (nodetype,)), dummy_tac)
class _CreateBaseOptions(BasedirOptions):
optParameters = [
# we provide 'create-node'-time options for the most common
# configuration knobs. The rest can be controlled by editing
# tahoe.cfg before node startup.
("nickname", "n", None, "Specify the nickname for this node."),
("introducer", "i", None, "Specify the introducer FURL to use."),
("webport", "p", "tcp:3456:interface=127.0.0.1",
"Specify which TCP port to run the HTTP interface on. Use 'none' to disable."),
("basedir", "C", None, "Specify which Tahoe base directory should be used. This has the same effect as the global --node-directory option. [default: %s]"
% quote_local_unicode_path(_default_nodedir)),
]
# This is overridden in order to ensure we get a "Wrong number of
# arguments." error when more than one argument is given.
def parseArgs(self, basedir=None):
BasedirOptions.parseArgs(self, basedir)
class CreateClientOptions(_CreateBaseOptions):
synopsis = "[options] [NODEDIR]"
description = "Create a client-only Tahoe-LAFS node (no storage server)."
class CreateNodeOptions(CreateClientOptions):
optFlags = [
("no-storage", None, "Do not offer storage service to other nodes."),
]
synopsis = "[options] [NODEDIR]"
description = "Create a full Tahoe-LAFS node (client+server)."
class CreateIntroducerOptions(NoDefaultBasedirOptions):
subcommand_name = "create-introducer"
description = "Create a Tahoe-LAFS introducer."
def write_node_config(c, config):
# this is shared between clients and introducers
c.write("# -*- mode: conf; coding: utf-8 -*-\n")
c.write("\n")
c.write("# This file controls the configuration of the Tahoe node that\n")
c.write("# lives in this directory. It is only read at node startup.\n")
c.write("# For details about the keys that can be set here, please\n")
c.write("# read the 'docs/configuration.rst' file that came with your\n")
c.write("# Tahoe installation.\n")
c.write("\n\n")
c.write("[node]\n")
nickname = argv_to_unicode(config.get("nickname") or "")
c.write("nickname = %s\n" % (nickname.encode('utf-8'),))
# TODO: validate webport
webport = argv_to_unicode(config.get("webport") or "none")
if webport.lower() == "none":
webport = ""
c.write("web.port = %s\n" % (webport.encode('utf-8'),))
c.write("web.static = public_html\n")
c.write("#tub.port =\n")
c.write("#tub.location = \n")
c.write("#log_gatherer.furl =\n")
c.write("#timeout.keepalive =\n")
c.write("#timeout.disconnect =\n")
c.write("#ssh.port = 8022\n")
c.write("#ssh.authorized_keys_file = ~/.ssh/authorized_keys\n")
c.write("\n")
def create_node(config, out=sys.stdout, err=sys.stderr):
basedir = config['basedir']
# This should always be called with an absolute Unicode basedir.
precondition(isinstance(basedir, unicode), basedir)
if os.path.exists(basedir):
if listdir_unicode(basedir):
print >>err, "The base directory %s is not empty." % quote_local_unicode_path(basedir)
print >>err, "To avoid clobbering anything, I am going to quit now."
print >>err, "Please use a different directory, or empty this one."
return -1
# we're willing to use an empty directory
else:
os.mkdir(basedir)
write_tac(basedir, "client")
c = open(os.path.join(basedir, "tahoe.cfg"), "w")
write_node_config(c, config)
c.write("[client]\n")
c.write("# Which services should this client connect to?\n")
c.write("introducer.furl = %s\n" % config.get("introducer", ""))
c.write("helper.furl =\n")
c.write("#stats_gatherer.furl =\n")
c.write("\n")
c.write("# Encoding parameters this client will use for newly-uploaded files\n")
c.write("# This can be changed at any time: the encoding is saved in\n")
c.write("# each filecap, and we can download old files with any encoding\n")
c.write("# settings\n")
c.write("#shares.needed = 3\n")
c.write("#shares.happy = 7\n")
c.write("#shares.total = 10\n")
c.write("\n")
boolstr = {True:"true", False:"false"}
c.write("[storage]\n")
c.write("# Shall this node provide storage service?\n")
storage_enabled = not config.get("no-storage", None)
c.write("enabled = %s\n" % boolstr[storage_enabled])
c.write("#readonly =\n")
c.write("reserved_space = 1G\n")
c.write("#expire.enabled =\n")
c.write("#expire.mode =\n")
c.write("\n")
c.write("[helper]\n")
c.write("# Shall this node run a helper service that clients can use?\n")
c.write("enabled = false\n")
c.write("\n")
c.write("[drop_upload]\n")
c.write("# Shall this node automatically upload files created or modified in a local directory?\n")
c.write("enabled = false\n")
c.write("# To specify the target of uploads, a mutable directory writecap URI must be placed\n"
"# in 'private/drop_upload_dircap'.\n")
c.write("local.directory = ~/drop_upload\n")
c.write("\n")
c.close()
from allmydata.util import fileutil
fileutil.make_dirs(os.path.join(basedir, "private"), 0700)
print >>out, "Node created in %s" % quote_local_unicode_path(basedir)
if not config.get("introducer", ""):
print >>out, " Please set [client]introducer.furl= in tahoe.cfg!"
print >>out, " The node cannot connect to a grid without it."
if not config.get("nickname", ""):
print >>out, " Please set [node]nickname= in tahoe.cfg"
return 0
def create_client(config, out=sys.stdout, err=sys.stderr):
config['no-storage'] = True
return create_node(config, out=out, err=err)
def create_introducer(config, out=sys.stdout, err=sys.stderr):
basedir = config['basedir']
# This should always be called with an absolute Unicode basedir.
precondition(isinstance(basedir, unicode), basedir)
if os.path.exists(basedir):
if listdir_unicode(basedir):
print >>err, "The base directory %s is not empty." % quote_local_unicode_path(basedir)
print >>err, "To avoid clobbering anything, I am going to quit now."
print >>err, "Please use a different directory, or empty this one."
return -1
# we're willing to use an empty directory
else:
os.mkdir(basedir)
write_tac(basedir, "introducer")
c = open(os.path.join(basedir, "tahoe.cfg"), "w")
write_node_config(c, config)
c.close()
print >>out, "Introducer created in %s" % quote_local_unicode_path(basedir)
return 0
subCommands = [
["create-node", None, CreateNodeOptions, "Create a node that acts as a client, server or both."],
["create-client", None, CreateClientOptions, "Create a client node (with storage initially disabled)."],
["create-introducer", None, CreateIntroducerOptions, "Create an introducer node."],
]
dispatch = {
"create-node": create_node,
"create-client": create_client,
"create-introducer": create_introducer,
}
|
gpl-2.0
|
Metaswitch/calico-nova
|
nova/tests/unit/virt/xenapi/client/test_session.py
|
11
|
6573
|
# Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import socket
import mock
from nova import exception
from nova.tests.unit.virt.xenapi import stubs
from nova import version
from nova.virt.xenapi.client import session
class SessionTestCase(stubs.XenAPITestBaseNoDB):
@mock.patch.object(session.XenAPISession, '_create_session')
@mock.patch.object(session.XenAPISession, '_get_product_version_and_brand')
@mock.patch.object(session.XenAPISession, '_verify_plugin_version')
def test_session_passes_version(self, mock_verify, mock_version,
create_session):
sess = mock.Mock()
create_session.return_value = sess
mock_version.return_value = ('version', 'brand')
session.XenAPISession('url', 'username', 'password')
expected_version = '%s %s %s' % (version.vendor_string(),
version.product_string(),
version.version_string_with_package())
sess.login_with_password.assert_called_with('username', 'password',
expected_version,
'OpenStack')
class ApplySessionHelpersTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(ApplySessionHelpersTestCase, self).setUp()
self.session = mock.Mock()
session.apply_session_helpers(self.session)
def test_apply_session_helpers_add_VM(self):
self.session.VM.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
def test_apply_session_helpers_add_SR(self):
self.session.SR.get_X("ref")
self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
def test_apply_session_helpers_add_VDI(self):
self.session.VDI.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
def test_apply_session_helpers_add_VBD(self):
self.session.VBD.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
def test_apply_session_helpers_add_PBD(self):
self.session.PBD.get_X("ref")
self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
def test_apply_session_helpers_add_PIF(self):
self.session.PIF.get_X("ref")
self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
def test_apply_session_helpers_add_VLAN(self):
self.session.VLAN.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
def test_apply_session_helpers_add_host(self):
self.session.host.get_X("ref")
self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
def test_apply_session_helpers_add_network(self):
self.session.network.get_X("ref")
self.session.call_xenapi.assert_called_once_with("network.get_X",
"ref")
def test_apply_session_helpers_add_pool(self):
self.session.pool.get_X("ref")
self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
class CallPluginTestCase(stubs.XenAPITestBaseNoDB):
def _get_fake_xapisession(self):
class FakeXapiSession(session.XenAPISession):
def __init__(self, **kwargs):
"Skip the superclass's dirty init"
self.XenAPI = mock.MagicMock()
return FakeXapiSession()
def setUp(self):
super(CallPluginTestCase, self).setUp()
self.session = self._get_fake_xapisession()
def test_serialized_with_retry_socket_error_conn_reset(self):
exc = socket.error
exc.errno = errno.ECONNRESET
plugin = 'glance'
fn = 'download_vhd'
num_retries = 1
callback = None
retry_cb = mock.Mock()
with mock.patch.object(self.session, 'call_plugin_serialized',
autospec=True) as call_plugin_serialized:
call_plugin_serialized.side_effect = exc
self.assertRaises(exception.PluginRetriesExceeded,
self.session.call_plugin_serialized_with_retry, plugin, fn,
num_retries, callback, retry_cb)
call_plugin_serialized.assert_called_with(plugin, fn)
self.assertEqual(2, call_plugin_serialized.call_count)
self.assertEqual(2, retry_cb.call_count)
def test_serialized_with_retry_socket_error_reraised(self):
exc = socket.error
exc.errno = errno.ECONNREFUSED
plugin = 'glance'
fn = 'download_vhd'
num_retries = 1
callback = None
retry_cb = mock.Mock()
with mock.patch.object(self.session, 'call_plugin_serialized',
autospec=True) as call_plugin_serialized:
call_plugin_serialized.side_effect = exc
self.assertRaises(socket.error,
self.session.call_plugin_serialized_with_retry, plugin, fn,
num_retries, callback, retry_cb)
call_plugin_serialized.assert_called_once_with(plugin, fn)
self.assertEqual(0, retry_cb.call_count)
def test_serialized_with_retry_socket_reset_reraised(self):
exc = socket.error
exc.errno = errno.ECONNRESET
plugin = 'glance'
fn = 'download_vhd'
num_retries = 1
callback = None
retry_cb = mock.Mock()
with mock.patch.object(self.session, 'call_plugin_serialized',
autospec=True) as call_plugin_serialized:
call_plugin_serialized.side_effect = exc
self.assertRaises(exception.PluginRetriesExceeded,
self.session.call_plugin_serialized_with_retry, plugin, fn,
num_retries, callback, retry_cb)
call_plugin_serialized.assert_called_with(plugin, fn)
self.assertEqual(2, call_plugin_serialized.call_count)
|
apache-2.0
|
kenorb-contrib/BitTorrent
|
twisted/test/test_app.py
|
16
|
4746
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.internet.app.
"""
from twisted.trial import unittest, util
from twisted.internet import app, protocol, error
from twisted.internet.defer import succeed, fail, SUCCESS, FAILURE
from twisted.python import log
import warnings
class AppTestCase(unittest.TestCase):
suppress = [util.suppress(message='twisted.internet.app is deprecated',
category=DeprecationWarning)]
def testListenUnlistenTCP(self):
a = app.Application("foo")
f = protocol.ServerFactory()
a.listenTCP(9999, f)
a.listenTCP(9998, f)
self.assertEquals(len(a.tcpPorts), 2)
a.unlistenTCP(9999)
self.assertEquals(len(a.tcpPorts), 1)
a.listenTCP(9999, f, interface='127.0.0.1')
self.assertEquals(len(a.tcpPorts), 2)
a.unlistenTCP(9999, '127.0.0.1')
self.assertEquals(len(a.tcpPorts), 1)
a.unlistenTCP(9998)
self.assertEquals(len(a.tcpPorts), 0)
def testListenUnlistenUDP(self):
a = app.Application("foo")
f = protocol.DatagramProtocol()
a.listenUDP(9999, f)
a.listenUDP(9998, f)
self.assertEquals(len(a.udpPorts), 2)
a.unlistenUDP(9999)
self.assertEquals(len(a.udpPorts), 1)
a.listenUDP(9999, f, interface='127.0.0.1')
self.assertEquals(len(a.udpPorts), 2)
a.unlistenUDP(9999, '127.0.0.1')
self.assertEquals(len(a.udpPorts), 1)
a.unlistenUDP(9998)
self.assertEquals(len(a.udpPorts), 0)
def testListenUnlistenUNIX(self):
a = app.Application("foo")
f = protocol.ServerFactory()
a.listenUNIX("xxx", f)
self.assertEquals(len(a.unixPorts), 1)
a.unlistenUNIX("xxx")
self.assertEquals(len(a.unixPorts), 0)
def testIllegalUnlistens(self):
a = app.Application("foo")
self.assertRaises(error.NotListeningError, a.unlistenTCP, 1010)
self.assertRaises(error.NotListeningError, a.unlistenUNIX, '1010')
self.assertRaises(error.NotListeningError, a.unlistenSSL, 1010)
self.assertRaises(error.NotListeningError, a.unlistenUDP, 1010)
class ServiceTestCase(unittest.TestCase):
def testRegisterService(self):
a = app.Application("foo")
svc = app.ApplicationService("service", a)
self.assertEquals(a.getServiceNamed("service"), svc)
self.assertEquals(a, svc.serviceParent)
testRegisterService.suppress = [util.suppress(message='twisted.internet.app is deprecated',
category=DeprecationWarning)]
class StopError(Exception): pass
class StoppingService(app.ApplicationService):
def __init__(self, name, succeed):
app.ApplicationService.__init__(self, name)
self.succeed = succeed
def stopService(self):
if self.succeed:
return succeed("yay!")
else:
return fail(StopError('boo'))
class StoppingServiceII(app.ApplicationService):
def stopService(self):
# The default stopService returns None.
return None # return app.ApplicationService.stopService(self)
class MultiServiceTestCase(unittest.TestCase):
def setUp(self):
self.callbackRan = 0
def testDeferredStopService(self):
ms = app.MultiService("MultiService")
self.s1 = StoppingService("testService", 0)
self.s2 = StoppingService("testService2", 1)
ms.addService(self.s1)
ms.addService(self.s2)
ms.stopService().addCallback(self.woohoo)
log.flushErrors (StopError)
def woohoo(self, res):
self.callbackRan = 1
self.assertEqual(res[self.s1][0], 0)
self.assertEqual(res[self.s2][0], 1)
def testStopServiceNone(self):
"""MultiService.stopService returns Deferred when service returns None.
"""
ms = app.MultiService("MultiService")
self.s1 = StoppingServiceII("testService")
ms.addService(self.s1)
d = ms.stopService()
d.addCallback(self.cb_nonetest)
log.flushErrors (StopError)
def cb_nonetest(self, res):
self.callbackRan = 1
self.assertEqual((SUCCESS, None), res[self.s1])
def testEmptyStopService(self):
"""MutliService.stopService returns Deferred when empty."""
ms = app.MultiService("MultiService")
d = ms.stopService()
d.addCallback(self.cb_emptytest)
def cb_emptytest(self, res):
self.callbackRan = 1
self.assertEqual(len(res), 0)
def tearDown(self):
log.flushErrors (StopError)
self.failUnless(self.callbackRan, "Callback was never run.")
|
gpl-3.0
|
shimpe/frescobaldi
|
frescobaldi_app/logtool/__init__.py
|
1
|
3833
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The log dockwindow.
"""
from __future__ import unicode_literals
from PyQt4.QtCore import QSettings, Qt
from PyQt4.QtGui import QAction, QKeySequence
import actioncollection
import actioncollectionmanager
import app
import panel
class LogTool(panel.Panel):
"""A dockwidget showing the log of running Jobs."""
def __init__(self, mainwindow):
super(LogTool, self).__init__(mainwindow)
self.hide()
self.toggleViewAction().setShortcut(QKeySequence("Meta+Alt+L"))
ac = self.actionCollection = Actions()
ac.log_next_error.triggered.connect(self.slotNextError)
ac.log_previous_error.triggered.connect(self.slotPreviousError)
actioncollectionmanager.manager(mainwindow).addActionCollection(ac)
mainwindow.addDockWidget(Qt.BottomDockWidgetArea, self)
app.jobStarted.connect(self.slotJobStarted)
app.jobFinished.connect(self.slotJobFinished)
def translateUI(self):
self.setWindowTitle(_("LilyPond Log"))
self.toggleViewAction().setText(_("LilyPond &Log"))
def createWidget(self):
from . import logwidget
return logwidget.LogWidget(self)
def slotJobStarted(self, doc, job):
"""Called whenever job starts, decides whether to follow it and show the log."""
import jobattributes
jattrs = jobattributes.get(job)
if doc == self.mainwindow().currentDocument() or self.mainwindow() == jattrs.mainwindow:
self.widget().switchDocument(doc)
if not jattrs.hidden and QSettings().value("log/show_on_start", True, bool):
self.show()
def slotJobFinished(self, document, job, success):
import jobattributes
if (not success and not job.is_aborted()
and not jobattributes.get(job).hidden
and document == self.mainwindow().currentDocument()):
self.show()
def slotNextError(self):
"""Jumps to the position pointed to by the next error message."""
self.activate()
self.widget().gotoError(1)
def slotPreviousError(self):
"""Jumps to the position pointed to by the next error message."""
self.activate()
self.widget().gotoError(-1)
class Actions(actioncollection.ActionCollection):
name = "logtool"
def createActions(self, parent=None):
self.log_next_error = QAction(parent)
self.log_previous_error = QAction(parent)
self.log_next_error.setShortcut(QKeySequence("Ctrl+E"))
self.log_previous_error.setShortcut(QKeySequence("Ctrl+Shift+E"))
def translateUI(self):
self.log_next_error.setText(_("Next Error Message"))
self.log_previous_error.setText(_("Previous Error Message"))
# log errors by initializing Errors instance
@app.jobStarted.connect
def _log_errors(document):
from . import errors
errors.errors(document)
|
gpl-2.0
|
xfournet/intellij-community
|
python/lib/Lib/site-packages/django/contrib/admin/widgets.py
|
73
|
11754
|
"""
Form Widget classes specific to the Django admin site.
"""
import django.utils.copycompat as copy
from django import forms
from django.forms.widgets import RadioFieldRenderer
from django.forms.util import flatatt
from django.utils.html import escape
from django.utils.text import truncate_words
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/core.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectBox.js",
settings.ADMIN_MEDIA_PREFIX + "js/SelectFilter2.js")
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None: attrs = {}
attrs['class'] = 'selectfilter'
if self.is_stacked: attrs['class'] += 'stacked'
output = [super(FilteredSelectMultiple, self).render(name, value, attrs, choices)]
output.append(u'<script type="text/javascript">addEvent(window, "load", function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'SelectFilter.init("id_%s", "%s", %s, "%s"); });</script>\n' % \
(name, self.verbose_name.replace('"', '\\"'), int(self.is_stacked), settings.ADMIN_MEDIA_PREFIX))
return mark_safe(u''.join(output))
class AdminDateWidget(forms.DateInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminDateWidget, self).__init__(attrs={'class': 'vDateField', 'size': '10'}, format=format)
class AdminTimeWidget(forms.TimeInput):
class Media:
js = (settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js")
def __init__(self, attrs={}, format=None):
super(AdminTimeWidget, self).__init__(attrs={'class': 'vTimeField', 'size': '8'}, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<p class="datetime">%s %s<br />%s %s</p>' % \
(_('Date:'), rendered_widgets[0], _('Time:'), rendered_widgets[1]))
class AdminRadioFieldRenderer(RadioFieldRenderer):
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul%s>\n%s\n</ul>' % (
flatatt(self.attrs),
u'\n'.join([u'<li>%s</li>' % force_unicode(w) for w in self]))
)
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = (u'<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = (u'<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
def __init__(self, rel, attrs=None, using=None):
self.rel = rel
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
related_url = '../../../%s/%s/' % (self.rel.to._meta.app_label, self.rel.to._meta.object_name.lower())
params = self.url_parameters()
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if "class" not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField' # The JavaScript looks for this hook.
output = [super(ForeignKeyRawIdWidget, self).render(name, value, attrs)]
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('<a href="%s%s" class="related-lookup" id="lookup_id_%s" onclick="return showRelatedObjectLookupPopup(this);"> ' % \
(related_url, url, name))
output.append('<img src="%simg/admin/selector-search.gif" width="16" height="16" alt="%s" /></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Lookup')))
if value:
output.append(self.label_for_value(value))
return mark_safe(u''.join(output))
def base_url_parameters(self):
params = {}
if self.rel.limit_choices_to and hasattr(self.rel.limit_choices_to, 'items'):
items = []
for k, v in self.rel.limit_choices_to.items():
if isinstance(v, list):
v = ','.join([str(x) for x in v])
else:
v = str(v)
items.append((k, v))
params.update(dict(items))
return params
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.to._default_manager.using(self.db).get(**{key: value})
return ' <strong>%s</strong>' % escape(truncate_words(obj, 14))
except (ValueError, self.rel.to.DoesNotExist):
return ''
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
attrs['class'] = 'vManyToManyRawIdAdminField'
if value:
value = ','.join([force_unicode(v) for v in value])
else:
value = ''
return super(ManyToManyRawIdWidget, self).render(name, value, attrs)
def url_parameters(self):
return self.base_url_parameters()
def label_for_value(self, value):
return ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
for pk1, pk2 in zip(initial, data):
if force_unicode(pk1) != force_unicode(pk2):
return True
return False
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
def __init__(self, widget, rel, admin_site, can_add_related=None):
self.is_hidden = widget.is_hidden
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.to in admin_site._registry
self.can_add_related = can_add_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
def _media(self):
return self.widget.media
media = property(_media)
def render(self, name, value, *args, **kwargs):
rel_to = self.rel.to
info = (rel_to._meta.app_label, rel_to._meta.object_name.lower())
try:
related_url = reverse('admin:%s_%s_add' % info, current_app=self.admin_site.name)
except NoReverseMatch:
info = (self.admin_site.root_path, rel_to._meta.app_label, rel_to._meta.object_name.lower())
related_url = '%s%s/%s/add/' % info
self.widget.choices = self.choices
output = [self.widget.render(name, value, *args, **kwargs)]
if self.can_add_related:
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(related_url, name))
output.append(u'<img src="%simg/admin/icon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (settings.ADMIN_MEDIA_PREFIX, _('Add Another')))
return mark_safe(u''.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs)
return self.attrs
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def _has_changed(self, initial, data):
return self.widget._has_changed(initial, data)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vCommaSeparatedIntegerField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget, self).__init__(attrs=final_attrs)
|
apache-2.0
|
maxisi/gwsumm
|
gwsumm/globalv.py
|
1
|
1220
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWSumm.
#
# GWSumm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWSumm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWSumm. If not, see <http://www.gnu.org/licenses/>.
"""Set of global memory variables for GWSumm package
"""
import time
from gwpy.time import tconvert
from gwpy.segments import DataQualityDict
from gwpy.detector import ChannelList
CHANNELS = ChannelList()
STATES = {}
DATA = {}
SPECTROGRAMS = {}
SPECTRUM = {}
SEGMENTS = DataQualityDict()
TRIGGERS = {}
VERBOSE = False
PROFILE = False
START = time.time()
# run time variables
MODE = 4
WRITTEN_PLOTS = []
NOW = tconvert('now').seconds
HTMLONLY = False
# comments
IFO = None
HTML_COMMENTS_NAME = None
|
gpl-3.0
|
shootstar/novatest
|
nova/scheduler/filters/retry_filter.py
|
4
|
1724
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class RetryFilter(filters.BaseHostFilter):
"""Filter out nodes that have already been attempted for scheduling
purposes
"""
def host_passes(self, host_state, filter_properties):
"""Skip nodes that have already been attempted."""
retry = filter_properties.get('retry', None)
if not retry:
# Re-scheduling is disabled
LOG.debug("Re-scheduling is disabled")
return True
hosts = retry.get('hosts', [])
host = [host_state.host, host_state.nodename]
passes = host not in hosts
pass_msg = "passes" if passes else "fails"
LOG.debug(_("Host %(host)s %(pass_msg)s. Previously tried hosts: "
"%(hosts)s") % {'host': host,
'pass_msg': pass_msg,
'hosts': hosts})
# Host passes if it's not in the list of previously attempted hosts:
return passes
|
apache-2.0
|
mjpatter88/mjpython
|
test/int/test_integration.py
|
1
|
7967
|
from virtual_machine import VirtualMachine
class TestByteCodeObjectExecution():
def setup_method(self):
self.vm = VirtualMachine()
def test_returning_const(self):
def test_func():
return 10
assert self.vm.run_code(test_func.__code__) == 10
def test_returning_a_large_const(self):
def test_func():
return 100000000
assert self.vm.run_code(test_func.__code__) == 100000000
def test_adding_two_constants(self):
def test_func():
return 10 + 20
assert self.vm.run_code(test_func.__code__) == 30
def test_adding_a_constant_and_a_variable(self):
def test_func():
a = 15
return a + 20
assert self.vm.run_code(test_func.__code__) == 35
def test_adding_two_variables(self):
def test_func():
a = 15
b = 27
return a + b
assert self.vm.run_code(test_func.__code__) == 42
def test_subtracting_two_variables(self):
def test_func():
a = 15
b = 27
return b - a
assert self.vm.run_code(test_func.__code__) == 12
def test_multiplying_two_variables(self):
def test_func():
a = 15
b = 27
return a * b
assert self.vm.run_code(test_func.__code__) == 405
def test_in_place_add(self):
def test_func():
a = 15
a += 5
return a
assert self.vm.run_code(test_func.__code__) == 20
def test_in_place_floor_division(self):
def test_func():
a = 10
a //= 3
return a
assert self.vm.run_code(test_func.__code__) == 3
def test_if_else__takes_if_branch(self):
def test_func():
x = 3
if x < 5:
return 'yes'
else:
return 'no'
assert self.vm.run_code(test_func.__code__) == 'yes'
def test_if_else__takes_else_branch(self):
def test_func():
x = 8
if x < 5:
return 'yes'
else:
return 'no'
assert self.vm.run_code(test_func.__code__) == 'no'
def test_while_loop(self):
def test_func():
x = 10
while x < 20:
x = x + 1
return x
assert self.vm.run_code(test_func.__code__) == 20
def test_while_loop_break(self):
def test_func():
x = 10
while x < 20:
x = x + 1
break
return x
assert self.vm.run_code(test_func.__code__) == 11
def test_while_loop_continue(self):
def test_func():
x = 10
y = 0
while y < 5:
y = y + 1
if True:
continue
x += 10
return x
assert self.vm.run_code(test_func.__code__) == 10
def test_nested_while_loop(self):
def test_func():
a = 0
x = 0
y = 0
while x < 10:
y = 0
while y < 11:
a += 1
y += 1
x += 1
return a
assert self.vm.run_code(test_func.__code__) == 110
def test_built_in_functions(self):
def test_func():
return abs(-5)
assert self.vm.run_code(test_func.__code__) == 5
def test_built_in_sum_function(self):
def test_func():
return sum((1,2,3,4))
assert self.vm.run_code(test_func.__code__) == 10
def test_make_and_call_function(self):
def test_func():
def test_inner_func():
return 7
return test_inner_func()
assert self.vm.run_code(test_func.__code__) == 7
def test_make_and_call_function_pos_args(self):
def test_func():
def test_inner_func(a, b):
return a + b
return test_inner_func(10, 15)
assert self.vm.run_code(test_func.__code__) == 25
def test_make_and_call_function_pos_args_ordering(self):
def test_func():
def test_inner_func(a, b):
return a - b
return test_inner_func(15, 10)
assert self.vm.run_code(test_func.__code__) == 5
def test_make_and_call_function_keyword_args(self):
def test_func():
def test_inner_func(a=0, b=0):
return a + b
return test_inner_func(a=10, b=15)
assert self.vm.run_code(test_func.__code__) == 25
def test_make_and_call_function_keyword_args_reverse_order(self):
def test_func():
def test_inner_func(a=0, b=0):
return a - b
return test_inner_func(b=15, a=10)
assert self.vm.run_code(test_func.__code__) == -5
def test_make_and_call_function_keyword_args_and_pos_args(self):
def test_func():
def test_inner_func(a, b, c=100, d=200):
return a + b - (c + d)
return test_inner_func(14, 13, c=4, d=3)
assert self.vm.run_code(test_func.__code__) == 20
def test_make_and_call_function_with_var_args(self):
def test_func():
def test_inner_func(*args):
a = sum(args)
return a
return test_inner_func(1, 2, 3, 4)
assert self.vm.run_code(test_func.__code__) == 10
def test_make_and_call_function_with_var_args_and_var_kw_args(self):
def test_func():
def test_inner_func(*args, **kwargs):
a = sum(args)
a += kwargs['bonus']
return a
return test_inner_func(1, 2, 3, 4, bonus=10)
assert self.vm.run_code(test_func.__code__) == 20
def test_make_and_call_function_pos_args_defaul_values(self):
def test_func():
def test_inner_func(a=4, b=7, c=1):
return a + b - c
return test_inner_func()
assert self.vm.run_code(test_func.__code__) == 10
def test_make_and_call_function_keyword_args_defaul_values(self):
def test_func():
def test_inner_func(a, *args, b=6, c=1):
return a + b - c
return test_inner_func(14, b=7)
assert self.vm.run_code(test_func.__code__) == 20
# def test_make_and_call_function_closure(self):
# def test_func():
# a = 3
# def test_inner_func():
# return 7 + a
# return test_inner_func()
# assert self.vm.run_code(test_func.__code__) == 10
def test_import_a_std_lib(self):
def test_func():
import math
return math
import math
assert self.vm.run_code(test_func.__code__) == math
def test_import_attr_from_a_std_lib(self):
def test_func():
from random import shuffle
return shuffle
from random import shuffle
assert self.vm.run_code(test_func.__code__) == shuffle
def test_import_multiple_attr_from_a_std_lib(self):
def test_func():
from math import pi, e, tau
return pi + e + tau
from math import pi, e, tau
assert self.vm.run_code(test_func.__code__) == pi + e + tau
def test_import_mod_from_a_std_lib(self):
def test_func():
from test import support
return support
from test import support
assert self.vm.run_code(test_func.__code__) == support
def test_build_class(self):
def test_func():
class Foo:
pass
return Foo
assert type(self.vm.run_code(test_func.__code__)) == type(type)
def test_load_attribute(self):
def test_func():
import math
a = math.pi
return a
import math
assert self.vm.run_code(test_func.__code__) == math.pi
|
mit
|
4eek/edx-platform
|
cms/djangoapps/contentstore/views/certificates.py
|
14
|
21676
|
"""
Certificates Data Model:
course.certificates: {
'certificates': [
{
'version': 1, // data contract version
'id': 12345, // autogenerated identifier
'name': 'Certificate 1',
'description': 'Certificate 1 Description',
'course_title': 'course title',
'signatories': [
{
'id': 24680, // autogenerated identifier
'name': 'Dr. Bob Smith',
'title': 'Dean of the College',
'organization': 'Awesome College'
}
]
}
]
}
"""
import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import ensure_csrf_cookie
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods
from contentstore.utils import reverse_course_url
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey, AssetKey
from eventtracking import tracker
from student.auth import has_studio_write_access
from student.roles import GlobalStaff
from util.db import generate_int_id, MYSQL_MAX_INT
from util.json_request import JsonResponse
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.django import modulestore
from contentstore.views.assets import delete_asset
from contentstore.views.exception import AssetNotFoundException
from django.core.exceptions import PermissionDenied
from course_modes.models import CourseMode
from contentstore.utils import get_lms_link_for_certificate_web_view
CERTIFICATE_SCHEMA_VERSION = 1
CERTIFICATE_MINIMUM_ID = 100
def _get_course_and_check_access(course_key, user, depth=0):
"""
Internal method used to calculate and return the locator and
course module for the view functions in this file.
"""
if not has_studio_write_access(user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key, depth=depth)
return course_module
def _delete_asset(course_key, asset_key_string):
"""
Internal method used to create asset key from string and
remove asset by calling delete_asset method of assets module.
"""
if asset_key_string:
# remove first slash in asset path
# otherwise it generates InvalidKeyError in case of split modulestore
if '/' == asset_key_string[0]:
asset_key_string = asset_key_string[1:]
asset_key = AssetKey.from_string(asset_key_string)
try:
delete_asset(course_key, asset_key)
# If the asset was not found, it doesn't have to be deleted...
except AssetNotFoundException:
pass
# Certificates Exceptions
class CertificateException(Exception):
"""
Base exception for Certificates workflows
"""
pass
class CertificateValidationError(CertificateException):
"""
An exception raised when certificate information is invalid.
"""
pass
class CertificateManager(object):
"""
The CertificateManager is responsible for storage, retrieval, and manipulation of Certificates
Certificates are not stored in the Django ORM, they are a field/setting on the course descriptor
"""
@staticmethod
def parse(json_string):
"""
Deserialize the provided JSON data into a standard Python object
"""
try:
certificate = json.loads(json_string)
except ValueError:
raise CertificateValidationError(_("invalid JSON"))
# Include the data contract version
certificate["version"] = CERTIFICATE_SCHEMA_VERSION
# Ensure a signatories list is always returned
if certificate.get("signatories") is None:
certificate["signatories"] = []
certificate["editing"] = False
return certificate
@staticmethod
def validate(certificate_data):
"""
Ensure the certificate data contains all of the necessary fields and the values match our rules
"""
# Ensure the schema version meets our expectations
if certificate_data.get("version") != CERTIFICATE_SCHEMA_VERSION:
raise TypeError(
"Unsupported certificate schema version: {0}. Expected version: {1}.".format(
certificate_data.get("version"),
CERTIFICATE_SCHEMA_VERSION
)
)
if not certificate_data.get("name"):
raise CertificateValidationError(_("must have name of the certificate"))
@staticmethod
def get_used_ids(course):
"""
Return a list of certificate identifiers that are already in use for this course
"""
if not course.certificates or not course.certificates.get('certificates'):
return []
return [cert['id'] for cert in course.certificates['certificates']]
@staticmethod
def assign_id(course, certificate_data, certificate_id=None):
"""
Assign an identifier to the provided certificate data.
If the caller did not provide an identifier, we autogenerate a unique one for them
In addition, we check the certificate's signatories and ensure they also have unique ids
"""
used_ids = CertificateManager.get_used_ids(course)
if certificate_id:
certificate_data['id'] = int(certificate_id)
else:
certificate_data['id'] = generate_int_id(
CERTIFICATE_MINIMUM_ID,
MYSQL_MAX_INT,
used_ids
)
for index, signatory in enumerate(certificate_data['signatories']): # pylint: disable=unused-variable
if signatory and not signatory.get('id', False):
signatory['id'] = generate_int_id(used_ids=used_ids)
used_ids.append(signatory['id'])
return certificate_data
@staticmethod
def serialize_certificate(certificate):
"""
Serialize the Certificate object's locally-stored certificate data to a JSON representation
We use direct access here for specific keys in order to enforce their presence
"""
certificate_data = certificate.certificate_data
certificate_response = {
"id": certificate_data['id'],
"name": certificate_data['name'],
"description": certificate_data['description'],
"is_active": certificate_data['is_active'],
"version": CERTIFICATE_SCHEMA_VERSION,
"signatories": certificate_data['signatories']
}
# Some keys are not required, such as the title override...
if certificate_data.get('course_title'):
certificate_response["course_title"] = certificate_data['course_title']
return certificate_response
@staticmethod
def deserialize_certificate(course, value):
"""
Deserialize from a JSON representation into a Certificate object.
'value' should be either a Certificate instance, or a valid JSON string
"""
# Ensure the schema fieldset meets our expectations
for key in ("name", "description", "version"):
if key not in value:
raise CertificateValidationError(_("Certificate dict {0} missing value key '{1}'").format(value, key))
# Load up the Certificate data
certificate_data = CertificateManager.parse(value)
CertificateManager.validate(certificate_data)
certificate_data = CertificateManager.assign_id(course, certificate_data, certificate_data.get('id', None))
certificate = Certificate(course, certificate_data)
# Return a new Certificate object instance
return certificate
@staticmethod
def get_certificates(course, only_active=False):
"""
Retrieve the certificates list from the provided course,
if `only_active` is True it would skip inactive certificates.
"""
# The top-level course field is 'certificates', which contains various properties,
# including the actual 'certificates' list that we're working with in this context
certificates = course.certificates.get('certificates', [])
if only_active:
certificates = [certificate for certificate in certificates if certificate.get('is_active', False)]
return certificates
@staticmethod
def remove_certificate(request, store, course, certificate_id):
"""
Remove certificate from the course
"""
for index, cert in enumerate(course.certificates['certificates']):
if int(cert['id']) == int(certificate_id):
certificate = course.certificates['certificates'][index]
# Remove any signatory assets prior to dropping the entire cert record from the course
for sig_index, signatory in enumerate(certificate.get('signatories')): # pylint: disable=unused-variable
_delete_asset(course.id, signatory['signature_image_path'])
# Now drop the certificate record
course.certificates['certificates'].pop(index)
store.update_item(course, request.user.id)
break
# pylint-disable: unused-variable
@staticmethod
def remove_signatory(request, store, course, certificate_id, signatory_id):
"""
Remove the specified signatory from the provided course certificate
"""
for cert_index, cert in enumerate(course.certificates['certificates']): # pylint: disable=unused-variable
if int(cert['id']) == int(certificate_id):
for sig_index, signatory in enumerate(cert.get('signatories')): # pylint: disable=unused-variable
if int(signatory_id) == int(signatory['id']):
_delete_asset(course.id, signatory['signature_image_path'])
del cert['signatories'][sig_index]
store.update_item(course, request.user.id)
break
@staticmethod
def track_event(event_name, event_data):
"""Track certificate configuration event.
Arguments:
event_name (str): Name of the event to be logged.
event_data (dict): A Dictionary containing event data
Returns:
None
"""
event_name = '.'.join(['edx', 'certificate', 'configuration', event_name])
tracker.emit(event_name, event_data)
class Certificate(object):
"""
The logical representation of an individual course certificate
"""
def __init__(self, course, certificate_data):
"""
Instantiate a Certificate object instance using the provided information.
"""
self.course = course
self._certificate_data = certificate_data
self.id = certificate_data['id'] # pylint: disable=invalid-name
@property
def certificate_data(self):
"""
Retrieve the locally-stored certificate data from the Certificate object via a helper method
"""
return self._certificate_data
@login_required
@require_http_methods(("POST",))
@ensure_csrf_cookie
def certificate_activation_handler(request, course_key_string):
"""
A handler for Certificate Activation/Deactivation
POST
json: is_active. update the activation state of certificate
"""
# Only global staff (PMs) are able to activate/deactivate certificate configuration
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
try:
course = _get_course_and_check_access(course_key, request.user)
except PermissionDenied:
msg = _('PermissionDenied: Failed in authenticating {user}').format(user=request.user)
return JsonResponse({"error": msg}, status=403)
data = json.loads(request.body)
is_active = data.get('is_active', False)
certificates = CertificateManager.get_certificates(course)
# for certificate activation/deactivation, we are assuming one certificate in certificates collection.
for certificate in certificates:
certificate['is_active'] = is_active
break
store.update_item(course, request.user.id)
cert_event_type = 'activated' if is_active else 'deactivated'
CertificateManager.track_event(cert_event_type, {
'course_id': unicode(course.id),
})
return HttpResponse(status=200)
@login_required
@require_http_methods(("GET", "POST"))
@ensure_csrf_cookie
def certificates_list_handler(request, course_key_string):
"""
A RESTful handler for Course Certificates
GET
html: return Certificates list page (Backbone application)
POST
json: create new Certificate
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
try:
course = _get_course_and_check_access(course_key, request.user)
except PermissionDenied:
msg = _('PermissionDenied: Failed in authenticating {user}').format(user=request.user)
return JsonResponse({"error": msg}, status=403)
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
certificate_url = reverse_course_url('certificates.certificates_list_handler', course_key)
course_outline_url = reverse_course_url('course_handler', course_key)
upload_asset_url = reverse_course_url('assets_handler', course_key)
activation_handler_url = reverse_course_url(
handler_name='certificates.certificate_activation_handler',
course_key=course_key
)
course_modes = [mode.slug for mode in CourseMode.modes_for_course(
course_id=course.id, include_expired=True
)]
certificate_web_view_url = get_lms_link_for_certificate_web_view(
user_id=request.user.id,
course_key=course_key,
mode=course_modes[0] # CourseMode.modes_for_course returns default mode 'honor' if doesn't find anyone.
)
certificates = None
is_active = False
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
certificates = CertificateManager.get_certificates(course)
# we are assuming only one certificate in certificates collection.
for certificate in certificates:
is_active = certificate.get('is_active', False)
break
return render_to_response('certificates.html', {
'context_course': course,
'certificate_url': certificate_url,
'course_outline_url': course_outline_url,
'upload_asset_url': upload_asset_url,
'certificates': json.dumps(certificates),
'course_modes': course_modes,
'certificate_web_view_url': certificate_web_view_url,
'is_active': is_active,
'is_global_staff': GlobalStaff().has_user(request.user),
'certificate_activation_handler_url': activation_handler_url
})
elif "application/json" in request.META.get('HTTP_ACCEPT'):
# Retrieve the list of certificates for the specified course
if request.method == 'GET':
certificates = CertificateManager.get_certificates(course)
return JsonResponse(certificates, encoder=EdxJSONEncoder)
elif request.method == 'POST':
# Add a new certificate to the specified course
try:
new_certificate = CertificateManager.deserialize_certificate(course, request.body)
except CertificateValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if course.certificates.get('certificates') is None:
course.certificates['certificates'] = []
course.certificates['certificates'].append(new_certificate.certificate_data)
response = JsonResponse(CertificateManager.serialize_certificate(new_certificate), status=201)
response["Location"] = reverse_course_url(
'certificates.certificates_detail_handler',
course.id,
kwargs={'certificate_id': new_certificate.id}
)
store.update_item(course, request.user.id)
CertificateManager.track_event('created', {
'course_id': unicode(course.id),
'configuration_id': new_certificate.id
})
course = _get_course_and_check_access(course_key, request.user)
return response
else:
return HttpResponse(status=406)
@login_required
@ensure_csrf_cookie
@require_http_methods(("POST", "PUT", "DELETE"))
def certificates_detail_handler(request, course_key_string, certificate_id):
"""
JSON API endpoint for manipulating a course certificate via its internal identifier.
Utilized by the Backbone.js 'certificates' application model
POST or PUT
json: update the specified certificate based on provided information
DELETE
json: remove the specified certificate from the course
"""
course_key = CourseKey.from_string(course_key_string)
course = _get_course_and_check_access(course_key, request.user)
certificates_list = course.certificates.get('certificates', [])
match_index = None
match_cert = None
for index, cert in enumerate(certificates_list):
if certificate_id is not None:
if int(cert['id']) == int(certificate_id):
match_index = index
match_cert = cert
store = modulestore()
if request.method in ('POST', 'PUT'):
if certificate_id:
active_certificates = CertificateManager.get_certificates(course, only_active=True)
if int(certificate_id) in [int(certificate["id"]) for certificate in active_certificates]:
# Only global staff (PMs) are able to edit active certificate configuration
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
try:
new_certificate = CertificateManager.deserialize_certificate(course, request.body)
except CertificateValidationError as err:
return JsonResponse({"error": err.message}, status=400)
serialized_certificate = CertificateManager.serialize_certificate(new_certificate)
cert_event_type = 'created'
if match_cert:
cert_event_type = 'modified'
certificates_list[match_index] = serialized_certificate
else:
certificates_list.append(serialized_certificate)
store.update_item(course, request.user.id)
CertificateManager.track_event(cert_event_type, {
'course_id': unicode(course.id),
'configuration_id': serialized_certificate["id"]
})
return JsonResponse(serialized_certificate, status=201)
elif request.method == "DELETE":
if not match_cert:
return JsonResponse(status=404)
active_certificates = CertificateManager.get_certificates(course, only_active=True)
if int(certificate_id) in [int(certificate["id"]) for certificate in active_certificates]:
# Only global staff (PMs) are able to delete active certificate configuration
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
CertificateManager.remove_certificate(
request=request,
store=store,
course=course,
certificate_id=certificate_id
)
CertificateManager.track_event('deleted', {
'course_id': unicode(course.id),
'configuration_id': certificate_id
})
return JsonResponse(status=204)
@login_required
@ensure_csrf_cookie
@require_http_methods(("POST", "PUT", "DELETE"))
def signatory_detail_handler(request, course_key_string, certificate_id, signatory_id):
"""
JSON API endpoint for manipulating a specific course certificate signatory via its internal identifier.
Utilized by the Backbone 'certificates' application.
DELETE
json: Remove the specified signatory from the specified certificate
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = _get_course_and_check_access(course_key, request.user)
certificates_list = course.certificates['certificates']
match_cert = None
# pylint: disable=unused-variable
for index, cert in enumerate(certificates_list):
if certificate_id is not None:
if int(cert['id']) == int(certificate_id):
match_cert = cert
if request.method == "DELETE":
if not match_cert:
return JsonResponse(status=404)
CertificateManager.remove_signatory(
request=request,
store=store,
course=course,
certificate_id=certificate_id,
signatory_id=signatory_id
)
return JsonResponse(status=204)
|
agpl-3.0
|
FacundoAcevedo/squidban
|
squidcontrol.py
|
1
|
4643
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import sys
import ConfigParser
import logging
import logging.config
from classes.daemon import Daemon
from classes.Comparador import Comparador
global RUTA_CONFIGURACION
# CONFIGURACION - CONFIGURACION - CONFIGURACION -
RUTA_CONFIGURACION = "/etc/squidban.cfg"
# FIN CONFIGURACION - FIN CONFIGURACION - FIN CONFIGURACION -
class SquidControl(Daemon):
"""Clase de control del demonio de squidban"""
def run(self):
"""Corre squidban en modo demonio"""
self.readConfig()
self.prepareLogging()
self.logger.info("Iniciando aplicacion")
self.prepararConfig()
try:
comparador = Comparador(self.config_comparador)
contador = 0
while True:
contador += 1
comparador.registrar(self.escanearhistoricos)
comparador.persistir(self.dbfile)
if contador >= 10:
contador = 0
self.logger.info("Actualizando reporte")
comparador.reporte()
comparador.reporteGuardar()
time.sleep(self.register_interval)
except:
self.logger.exception("Ha ocurrido una excepcion inesperada")
def runalone(self):
"""Corre squidban bajo demanda, no demonizado."""
self.readConfig()
self.prepareLogging()
self.logger.info("Iniciando aplicacion")
self.prepararConfig()
try:
comparador = Comparador(self.config_comparador)
comparador.registrar(self.escanearhistoricos)
comparador.persistir(self.dbfile)
self.logger.info("Actualizando reporte")
comparador.reporte()
comparador.reporteGuardar()
except:
self.logger.exception("Ha ocurrido una excepcion inesperada")
def prepararConfig(self):
"""Prepara la configuracion"""
self.config_comparador = {
'accesslog': self.accesslog,
'accesslog_historicos': self.accessloghistoricos,
'ipallowed': self.ipallowed,
'dnsallowed': self.dnsallowed,
'dbfile': self.dbfile,
'rta_ip_baneados': self.rta_ip_baneados,
'rta_dns_baneadas': self.rta_dns_baneadas,
}
def readConfig(self, config_file="config.cfg"):
"""Carga la configuracuion"""
try:
config = ConfigParser.ConfigParser()
config.read([RUTA_CONFIGURACION])
# Paths
self.accesslog = config.get("Paths", "accesslog")
self.accessloghistoricos = config.get("Paths",
"accesslog_historicos")
# lista
self.ipallowed = config.get("Paths",
"ipallowed").strip().split(",")
# lista
self.dnsallowed = config.get("Paths",
"dnsallowed").strip().split(",")
self.rta_ip_baneados = config.get("Paths", "ip_baneados").strip()
self.rta_dns_baneadas = config.get("Paths", "dns_baneadas").strip()
self.dbfile = config.get("Paths", "dbfile")
self.logconfig = config.get("Paths", "logconfig", "")
# Settings
self.escanearhistoricos = config.get("Settings",
"escanear_historicos")
# Times
self.register_interval = int(config.get("Times",
"intervalo_de_registro"))
self.max_inactivity = int(config.get("Times",
"tiempo_inactividad_usuario"))
except:
sys.stderr.write("No fue posible leer archivo de configuracion {}"
.format(config_file))
raise
def prepareLogging(self):
"""Prepara el logger"""
try:
logging.config.fileConfig(self.logconfig)
self.logger = logging.getLogger(__name__)
except:
sys.stderr.write("No fue posible leer archivo de configuracion {}"
.format(self.logconfig))
raise
def stop(self):
"""Finaliza el demonio"""
self.readConfig()
self.prepareLogging()
self.logger.warn("Deteniendo aplicacion")
Daemon.stop(self)
def restart(self):
"""Reinicia el demonio"""
self.logger.warn("Reiniciando aplicacion")
self.stop()
self.run()
def start(self):
"""Inicia el Squidban"""
s = SquidControl('/tmp/s.pid')
s.run()
if __name__ == "__main__":
s = SquidControl('/tmp/s.pid')
s.runalone()
|
gpl-3.0
|
AustinWise/mongrel2
|
examples/zcov/zcov/GCovGroup.py
|
96
|
6378
|
#!/usr/bin/python
from __future__ import division
from pprint import pprint
import cPickle
import os
import warnings
from zcov import GCovParser
class GCovGroup:
@staticmethod
def fromfile(path):
f = open(path)
try:
res = cPickle.load(f)
header,version = res[0],res[1]
except:
raise ValueError,'invalid zcov input'
if header != 'zcov-data':
raise ValueError,'input is not in zcov format'
elif version != 1:
raise ValueError,'unrecognized zcov version'
return res[2]
def tofile(self, path):
f = open(path,'wb')
cPickle.dump(('zcov-data',1,self),f,-1)
f.close()
def __init__(self):
self.entryMap = {}
def addEntry(self, path, entry):
record = self.entryMap.get(path)
if record is None:
self.entryMap[path] = entry
else:
self.entryMap[path] = self.mergeData(record,entry)
def addGCDA(self, data):
for path,entry in data.entries:
self.addEntry(path, entry)
def merge(self, b):
for path,entry in b.entryMap.items():
self.addEntry(path, entry)
def mergeData(self, a, b):
keys = self.mergeKeys(a.keys, b.keys)
lines = self.mergeLines(a.lines, b.lines)
calls = self.mergeCalls(a.calls, b.calls)
branches = self.mergeBranches(a.branches, b.branches)
functions = self.mergeFunctions(a.functions, b.functions)
return GCovParser.GCovFileData(keys, lines, calls, branches, functions)
def mergeKeys(self, aKeys, bKeys):
if set(aKeys) != set(bKeys):
raise ValueError,'Keys differ: %s, %s'%(pprint.pformat(a.keys),
pprint.pformat(b.keys))
keys = {}
for key,aValue in aKeys.items():
bValue = bKeys[key]
if key=='Source':
if aValue != bValue:
raise ValueError,'Key ("%s") differs: %s %s'%(key,
aValue,
bValue)
value = aValue
elif key in ('Runs','Programs'):
value = str(int(aValue) + int(bValue))
elif key in ('Data','Graph'):
value = aValue+','+bValue
else:
raise ValueError,'Unrecognized key: "%s"'%(key,)
keys[key] = value
return keys
def mergeLines(self, aLines, bLines):
if len(aLines) != len(bLines):
raise ValueError,'Entry mismatch (number of lines)'
lines = [None]*len(aLines)
for i,(a,b) in enumerate(zip(aLines,bLines)):
if a is None or b is None:
# Executability can change across tests (conditional
# code), take the non-None one if it exists.
lines[i] = (a,b)[a is None]
else:
lines[i] = a + b
return lines
def mergeLineList(self, aList, bList, merge):
if not aList:
for bItem in bList:
yield bItem
elif not bList:
for aItem in aList:
yield aItem
aIter,bIter = iter(aList),iter(bList)
aItem,bItem = aIter.next(),bIter.next()
while 1:
if aItem[0]==bItem[0]:
yield merge(aItem,bItem)
try:
aItem = aIter.next()
except StopIteration:
for bItem in bIter:
yield bItem
break
try:
bItem = bIter.next()
except StopIteration:
for aItem in aIter:
yield aItem
break
elif aItem[0]<bItem[0]:
yield aItem
try:
aItem = aIter.next()
except StopIteration:
yield bItem
for bItem in bIter:
yield bItem
break
else:
yield bItem
try:
bItem = bIter.next()
except StopIteration:
yield aItem
for aItem in bIter:
yield aItem
break
def mergeCalls(self, aCalls, bCalls):
def merge(a,b):
if a[1] != b[1]:
warnings.warn('Call mismatch (numbers differ)')
# raise ValueError,'Call mismatch (numbers differ)'
count = a[3]+b[3]
code = GCovParser.GCovFileData.CallNotExecuted
if GCovParser.GCovFileData.CallReturned in (a[2],b[2]):
code = GCovParser.GCovFileData.CallReturned
return (a[0],a[1],code,count)
return list(self.mergeLineList(aCalls,bCalls,merge))
def mergeBranches(self, aBranches, bBranches):
def merge(a,b):
# XXX This is really wrong
if a[1] != b[1]:
warnings.warn('Branch mismatch (numbers differ)')
# raise ValueError,'Branch mismatch (numbers differ)'
count = a[3]+b[3]
code = GCovParser.GCovFileData.BranchNotTaken
if GCovParser.GCovFileData.BranchTaken in (a[2],b[2]):
code = GCovParser.GCovFileData.BranchTaken
return (a[0],a[1],code,count)
return list(self.mergeLineList(aBranches,bBranches,merge))
def mergeFunctions(self, aFunctions, bFunctions):
def merge(a,b):
if a[0] != b[0]:
warnings.warn('Function mismatch (names differ)')
# raise ValueError,'Function mismatch (names differ)'
return (a[0],a[1]+b[1])
return list(self.mergeLineList(aFunctions,bFunctions,merge))
###
def main():
from optparse import OptionParser
op = OptionParser("usage: %prog [options] files")
opts,args = op.parse_args()
group = GCovGroup()
for f in args:
res = GCovParser.parseGCDA(f)
group.addGCDA(res)
print '%d total files'%(len(group.entryMap),)
if __name__=='__main__':
main()
|
bsd-3-clause
|
Gitlab11/odoo
|
openerp/addons/base/res/res_users.py
|
40
|
46628
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
import logging
from functools import partial
from itertools import repeat
from lxml import etree
from lxml.builder import E
import openerp
from openerp import SUPERUSER_ID, models
from openerp import tools
import openerp.exceptions
from openerp.osv import fields, osv, expression
from openerp.tools.translate import _
from openerp.http import request
_logger = logging.getLogger(__name__)
# Only users who can modify the user (incl. the user herself) see the real contents of these fields
USER_PRIVATE_FIELDS = ['password']
#----------------------------------------------------------
# Basic res.groups and res.users
#----------------------------------------------------------
class res_groups(osv.osv):
_name = "res.groups"
_description = "Access Groups"
_rec_name = 'full_name'
_order = 'name'
def _get_full_name(self, cr, uid, ids, field, arg, context=None):
res = {}
for g in self.browse(cr, uid, ids, context):
if g.category_id:
res[g.id] = '%s / %s' % (g.category_id.name, g.name)
else:
res[g.id] = g.name
return res
def _search_group(self, cr, uid, obj, name, args, context=None):
operand = args[0][2]
operator = args[0][1]
lst = True
if isinstance(operand, bool):
domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]]
if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand):
return expression.AND(domains)
else:
return expression.OR(domains)
if isinstance(operand, basestring):
lst = False
operand = [operand]
where = []
for group in operand:
values = filter(bool, group.split('/'))
group_name = values.pop().strip()
category_name = values and '/'.join(values).strip() or group_name
group_domain = [('name', operator, lst and [group_name] or group_name)]
category_domain = [('category_id.name', operator, lst and [category_name] or category_name)]
if operator in expression.NEGATIVE_TERM_OPERATORS and not values:
category_domain = expression.OR([category_domain, [('category_id', '=', False)]])
if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values):
sub_where = expression.AND([group_domain, category_domain])
else:
sub_where = expression.OR([group_domain, category_domain])
if operator in expression.NEGATIVE_TERM_OPERATORS:
where = expression.AND([where, sub_where])
else:
where = expression.OR([where, sub_where])
return where
_columns = {
'name': fields.char('Name', required=True, translate=True),
'users': fields.many2many('res.users', 'res_groups_users_rel', 'gid', 'uid', 'Users'),
'model_access': fields.one2many('ir.model.access', 'group_id', 'Access Controls', copy=True),
'rule_groups': fields.many2many('ir.rule', 'rule_group_rel',
'group_id', 'rule_group_id', 'Rules', domain=[('global', '=', False)]),
'menu_access': fields.many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', 'Access Menu'),
'view_access': fields.many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', 'Views'),
'comment' : fields.text('Comment', size=250, translate=True),
'category_id': fields.many2one('ir.module.category', 'Application', select=True),
'full_name': fields.function(_get_full_name, type='char', string='Group Name', fnct_search=_search_group),
}
_sql_constraints = [
('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!')
]
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# add explicit ordering if search is sorted on full_name
if order and order.startswith('full_name'):
ids = super(res_groups, self).search(cr, uid, args, context=context)
gs = self.browse(cr, uid, ids, context)
gs.sort(key=lambda g: g.full_name, reverse=order.endswith('DESC'))
gs = gs[offset:offset+limit] if limit else gs[offset:]
return map(int, gs)
return super(res_groups, self).search(cr, uid, args, offset, limit, order, context, count)
def copy(self, cr, uid, id, default=None, context=None):
group_name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update({'name': _('%s (copy)')%group_name})
return super(res_groups, self).copy(cr, uid, id, default, context)
def write(self, cr, uid, ids, vals, context=None):
if 'name' in vals:
if vals['name'].startswith('-'):
raise osv.except_osv(_('Error'),
_('The name of the group can not start with "-"'))
res = super(res_groups, self).write(cr, uid, ids, vals, context=context)
self.pool['ir.model.access'].call_cache_clearing_methods(cr)
self.pool['res.users'].has_group.clear_cache(self.pool['res.users'])
return res
class res_users(osv.osv):
""" User class. A res.users record models an OpenERP user and is different
from an employee.
res.users class now inherits from res.partner. The partner model is
used to store the data related to the partner: lang, name, address,
avatar, ... The user model is now dedicated to technical data.
"""
__admin_ids = {}
__uid_cache = {}
_inherits = {
'res.partner': 'partner_id',
}
_name = "res.users"
_description = 'Users'
_order = 'name, login'
def _set_new_password(self, cr, uid, id, name, value, args, context=None):
if value is False:
# Do not update the password if no value is provided, ignore silently.
# For example web client submits False values for all empty fields.
return
if uid == id:
# To change their own password users must use the client-specific change password wizard,
# so that the new password is immediately used for further RPC requests, otherwise the user
# will face unexpected 'Access Denied' exceptions.
raise osv.except_osv(_('Operation Canceled'), _('Please use the change password wizard (in User Preferences or User menu) to change your own password.'))
self.write(cr, uid, id, {'password': value})
def _get_password(self, cr, uid, ids, arg, karg, context=None):
return dict.fromkeys(ids, '')
_columns = {
'id': fields.integer('ID'),
'login_date': fields.date('Latest connection', select=1, copy=False),
'partner_id': fields.many2one('res.partner', required=True,
string='Related Partner', ondelete='restrict',
help='Partner-related data of the user', auto_join=True),
'login': fields.char('Login', size=64, required=True,
help="Used to log into the system"),
'password': fields.char('Password', size=64, invisible=True, copy=False,
help="Keep empty if you don't want the user to be able to connect on the system."),
'new_password': fields.function(_get_password, type='char', size=64,
fnct_inv=_set_new_password, string='Set Password',
help="Specify a value only when creating a user or if you're "\
"changing the user's password, otherwise leave empty. After "\
"a change of password, the user has to login again."),
'signature': fields.html('Signature'),
'active': fields.boolean('Active'),
'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at log on for this user, in addition to the standard menu."),
'groups_id': fields.many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', 'Groups'),
# Special behavior for this field: res.company.search() will only return the companies
# available to the current user (should be the user's companies?), when the user_preference
# context is set.
'company_id': fields.many2one('res.company', 'Company', required=True,
help='The company this user is currently working for.', context={'user_preference': True}),
'company_ids':fields.many2many('res.company','res_company_users_rel','user_id','cid','Companies'),
}
# overridden inherited fields to bypass access rights, in case you have
# access to the user but not its corresponding partner
name = openerp.fields.Char(related='partner_id.name', inherited=True)
email = openerp.fields.Char(related='partner_id.email', inherited=True)
def on_change_login(self, cr, uid, ids, login, context=None):
if login and tools.single_email_re.match(login):
return {'value': {'email': login}}
return {}
def onchange_state(self, cr, uid, ids, state_id, context=None):
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_state(cr, uid, partner_ids, state_id, context=context)
def onchange_type(self, cr, uid, ids, is_company, context=None):
""" Wrapper on the user.partner onchange_type, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool['res.partner'].onchange_type(cr, uid, partner_ids, is_company, context=context)
def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None):
""" Wrapper on the user.partner onchange_address, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool['res.partner'].onchange_address(cr, uid, partner_ids, use_parent_address, parent_id, context=context)
def _check_company(self, cr, uid, ids, context=None):
return all(((this.company_id in this.company_ids) or not this.company_ids) for this in self.browse(cr, uid, ids, context))
_constraints = [
(_check_company, 'The chosen company is not in the allowed companies for this user', ['company_id', 'company_ids']),
]
_sql_constraints = [
('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !')
]
def _get_company(self,cr, uid, context=None, uid2=False):
if not uid2:
uid2 = uid
# Use read() to compute default company, and pass load=_classic_write to
# avoid useless name_get() calls. This will avoid prefetching fields
# while computing default values for new db columns, as the
# db backend may not be fully initialized yet.
user_data = self.pool['res.users'].read(cr, uid, uid2, ['company_id'],
context=context, load='_classic_write')
comp_id = user_data['company_id']
return comp_id or False
def _get_companies(self, cr, uid, context=None):
c = self._get_company(cr, uid, context)
if c:
return [c]
return False
def _get_group(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
result = []
try:
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user')
result.append(group_id)
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager')
result.append(group_id)
except ValueError:
# If these groups does not exists anymore
pass
return result
def _get_default_image(self, cr, uid, context=None):
return self.pool['res.partner']._get_default_image(cr, uid, False, colorize=True, context=context)
_defaults = {
'password': '',
'active': True,
'customer': False,
'company_id': _get_company,
'company_ids': _get_companies,
'groups_id': _get_group,
'image': _get_default_image,
}
# User can write on a few of his own fields (but not his groups for example)
SELF_WRITEABLE_FIELDS = ['password', 'signature', 'action_id', 'company_id', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz']
# User can read a few of his own fields
SELF_READABLE_FIELDS = ['signature', 'company_id', 'login', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz', 'tz_offset', 'groups_id', 'partner_id', '__last_update', 'action_id']
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
def override_password(o):
if ('id' not in o or o['id'] != uid):
for f in USER_PRIVATE_FIELDS:
if f in o:
o[f] = '********'
return o
if fields and (ids == [uid] or ids == uid):
for key in fields:
if not (key in self.SELF_READABLE_FIELDS or key.startswith('context_')):
break
else:
# safe fields only, so we read as super-user to bypass access rights
uid = SUPERUSER_ID
result = super(res_users, self).read(cr, uid, ids, fields=fields, context=context, load=load)
canwrite = self.pool['ir.model.access'].check(cr, uid, 'res.users', 'write', False)
if not canwrite:
if isinstance(ids, (int, long)):
result = override_password(result)
else:
result = map(override_password, result)
return result
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
if uid != SUPERUSER_ID:
groupby_fields = set([groupby] if isinstance(groupby, basestring) else groupby)
if groupby_fields.intersection(USER_PRIVATE_FIELDS):
raise openerp.exceptions.AccessError('Invalid groupby')
return super(res_users, self).read_group(
cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
if user != SUPERUSER_ID and args:
domain_terms = [term for term in args if isinstance(term, (tuple, list))]
domain_fields = set(left for (left, op, right) in domain_terms)
if domain_fields.intersection(USER_PRIVATE_FIELDS):
raise openerp.exceptions.AccessError('Invalid search criterion')
return super(res_users, self)._search(
cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count,
access_rights_uid=access_rights_uid)
def create(self, cr, uid, vals, context=None):
user_id = super(res_users, self).create(cr, uid, vals, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.partner_id.company_id:
user.partner_id.write({'company_id': user.company_id.id})
return user_id
def write(self, cr, uid, ids, values, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
if ids == [uid]:
for key in values.keys():
if not (key in self.SELF_WRITEABLE_FIELDS or key.startswith('context_')):
break
else:
if 'company_id' in values:
user = self.browse(cr, SUPERUSER_ID, uid, context=context)
if not (values['company_id'] in user.company_ids.ids):
del values['company_id']
uid = 1 # safe fields only, so we write as super-user to bypass access rights
res = super(res_users, self).write(cr, uid, ids, values, context=context)
if 'company_id' in values:
for user in self.browse(cr, uid, ids, context=context):
# if partner is global we keep it that way
if user.partner_id.company_id and user.partner_id.company_id.id != values['company_id']:
user.partner_id.write({'company_id': user.company_id.id})
# clear default ir values when company changes
self.pool['ir.values'].get_defaults_dict.clear_cache(self.pool['ir.values'])
# clear caches linked to the users
self.pool['ir.model.access'].call_cache_clearing_methods(cr)
clear = partial(self.pool['ir.rule'].clear_cache, cr)
map(clear, ids)
db = cr.dbname
if db in self.__uid_cache:
for id in ids:
if id in self.__uid_cache[db]:
del self.__uid_cache[db][id]
self._context_get.clear_cache(self)
self.has_group.clear_cache(self)
return res
def unlink(self, cr, uid, ids, context=None):
if 1 in ids:
raise osv.except_osv(_('Can not remove root user!'), _('You can not remove the admin user as it is used internally for resources created by Odoo (updates, module installation, ...)'))
db = cr.dbname
if db in self.__uid_cache:
for id in ids:
if id in self.__uid_cache[db]:
del self.__uid_cache[db][id]
return super(res_users, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if not context:
context={}
ids = []
if name and operator in ['=', 'ilike']:
ids = self.search(cr, user, [('login','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name',operator,name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
user2copy = self.read(cr, uid, [id], ['login','name'])[0]
default = dict(default or {})
if ('name' not in default) and ('partner_id' not in default):
default['name'] = _("%s (copy)") % user2copy['name']
if 'login' not in default:
default['login'] = _("%s (copy)") % user2copy['login']
return super(res_users, self).copy(cr, uid, id, default, context)
@tools.ormcache(skiparg=2)
def _context_get(self, cr, uid):
user = self.browse(cr, SUPERUSER_ID, uid)
result = {}
for k in self._fields:
if k.startswith('context_'):
context_key = k[8:]
elif k in ['lang', 'tz']:
context_key = k
else:
context_key = False
if context_key:
res = getattr(user, k) or False
if isinstance(res, models.BaseModel):
res = res.id
result[context_key] = res or False
return result
def context_get(self, cr, uid, context=None):
return self._context_get(cr, uid)
def action_get(self, cr, uid, context=None):
dataobj = self.pool['ir.model.data']
data_id = dataobj._get_id(cr, SUPERUSER_ID, 'base', 'action_res_users_my')
return dataobj.browse(cr, uid, data_id, context=context).res_id
def check_super(self, passwd):
if passwd == tools.config['admin_passwd']:
return True
else:
raise openerp.exceptions.AccessDenied()
def check_credentials(self, cr, uid, password):
""" Override this method to plug additional authentication methods"""
res = self.search(cr, SUPERUSER_ID, [('id','=',uid),('password','=',password)])
if not res:
raise openerp.exceptions.AccessDenied()
def _login(self, db, login, password):
if not password:
return False
user_id = False
cr = self.pool.cursor()
try:
# autocommit: our single update request will be performed atomically.
# (In this way, there is no opportunity to have two transactions
# interleaving their cr.execute()..cr.commit() calls and have one
# of them rolled back due to a concurrent access.)
cr.autocommit(True)
# check if user exists
res = self.search(cr, SUPERUSER_ID, [('login','=',login)])
if res:
user_id = res[0]
# check credentials
self.check_credentials(cr, user_id, password)
# We effectively unconditionally write the res_users line.
# Even w/ autocommit there's a chance the user row will be locked,
# in which case we can't delay the login just for the purpose of
# update the last login date - hence we use FOR UPDATE NOWAIT to
# try to get the lock - fail-fast
# Failing to acquire the lock on the res_users row probably means
# another request is holding it. No big deal, we don't want to
# prevent/delay login in that case. It will also have been logged
# as a SQL error, if anyone cares.
try:
# NO KEY introduced in PostgreSQL 9.3 http://www.postgresql.org/docs/9.3/static/release-9-3.html#AEN115299
update_clause = 'NO KEY UPDATE' if cr._cnx.server_version >= 90300 else 'UPDATE'
cr.execute("SELECT id FROM res_users WHERE id=%%s FOR %s NOWAIT" % update_clause, (user_id,), log_exceptions=False)
cr.execute("UPDATE res_users SET login_date = now() AT TIME ZONE 'UTC' WHERE id=%s", (user_id,))
self.invalidate_cache(cr, user_id, ['login_date'], [user_id])
except Exception:
_logger.debug("Failed to update last_login for db:%s login:%s", db, login, exc_info=True)
except openerp.exceptions.AccessDenied:
_logger.info("Login failed for db:%s login:%s", db, login)
user_id = False
finally:
cr.close()
return user_id
def authenticate(self, db, login, password, user_agent_env):
"""Verifies and returns the user ID corresponding to the given
``login`` and ``password`` combination, or False if there was
no matching user.
:param str db: the database on which user is trying to authenticate
:param str login: username
:param str password: user password
:param dict user_agent_env: environment dictionary describing any
relevant environment attributes
"""
uid = self._login(db, login, password)
if uid == openerp.SUPERUSER_ID:
# Successfully logged in as admin!
# Attempt to guess the web base url...
if user_agent_env and user_agent_env.get('base_location'):
cr = self.pool.cursor()
try:
base = user_agent_env['base_location']
ICP = self.pool['ir.config_parameter']
if not ICP.get_param(cr, uid, 'web.base.url.freeze'):
ICP.set_param(cr, uid, 'web.base.url', base)
cr.commit()
except Exception:
_logger.exception("Failed to update web.base.url configuration parameter")
finally:
cr.close()
return uid
def check(self, db, uid, passwd):
"""Verifies that the given (uid, password) is authorized for the database ``db`` and
raise an exception if it is not."""
if not passwd:
# empty passwords disallowed for obvious security reasons
raise openerp.exceptions.AccessDenied()
if self.__uid_cache.setdefault(db, {}).get(uid) == passwd:
return
cr = self.pool.cursor()
try:
self.check_credentials(cr, uid, passwd)
self.__uid_cache[db][uid] = passwd
finally:
cr.close()
def change_password(self, cr, uid, old_passwd, new_passwd, context=None):
"""Change current user password. Old password must be provided explicitly
to prevent hijacking an existing user session, or for cases where the cleartext
password is not used to authenticate requests.
:return: True
:raise: openerp.exceptions.AccessDenied when old password is wrong
:raise: except_osv when new password is not set or empty
"""
self.check(cr.dbname, uid, old_passwd)
if new_passwd:
return self.write(cr, uid, uid, {'password': new_passwd})
raise osv.except_osv(_('Warning!'), _("Setting empty passwords is not allowed for security reasons!"))
def preference_save(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'reload_context',
}
def preference_change_password(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'change_password',
'target': 'new',
}
@tools.ormcache(skiparg=2)
def has_group(self, cr, uid, group_ext_id):
"""Checks whether user belongs to given group.
:param str group_ext_id: external ID (XML ID) of the group.
Must be provided in fully-qualified form (``module.ext_id``), as there
is no implicit module to use..
:return: True if the current user is a member of the group with the
given external ID (XML ID), else False.
"""
assert group_ext_id and '.' in group_ext_id, "External ID must be fully qualified"
module, ext_id = group_ext_id.split('.')
cr.execute("""SELECT 1 FROM res_groups_users_rel WHERE uid=%s AND gid IN
(SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""",
(uid, module, ext_id))
return bool(cr.fetchone())
#----------------------------------------------------------
# Implied groups
#
# Extension of res.groups and res.users with a relation for "implied"
# or "inherited" groups. Once a user belongs to a group, it
# automatically belongs to the implied groups (transitively).
#----------------------------------------------------------
class cset(object):
""" A cset (constrained set) is a set of elements that may be constrained to
be a subset of other csets. Elements added to a cset are automatically
added to its supersets. Cycles in the subset constraints are supported.
"""
def __init__(self, xs):
self.supersets = set()
self.elements = set(xs)
def subsetof(self, other):
if other is not self:
self.supersets.add(other)
other.update(self.elements)
def update(self, xs):
xs = set(xs) - self.elements
if xs: # xs will eventually be empty in case of a cycle
self.elements.update(xs)
for s in self.supersets:
s.update(xs)
def __iter__(self):
return iter(self.elements)
concat = itertools.chain.from_iterable
class groups_implied(osv.osv):
_inherit = 'res.groups'
def _get_trans_implied(self, cr, uid, ids, field, arg, context=None):
"computes the transitive closure of relation implied_ids"
memo = {} # use a memo for performance and cycle avoidance
def computed_set(g):
if g not in memo:
memo[g] = cset(g.implied_ids)
for h in g.implied_ids:
computed_set(h).subsetof(memo[g])
return memo[g]
res = {}
for g in self.browse(cr, SUPERUSER_ID, ids, context):
res[g.id] = map(int, computed_set(g))
return res
_columns = {
'implied_ids': fields.many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid',
string='Inherits', help='Users of this group automatically inherit those groups'),
'trans_implied_ids': fields.function(_get_trans_implied,
type='many2many', relation='res.groups', string='Transitively inherits'),
}
def create(self, cr, uid, values, context=None):
users = values.pop('users', None)
gid = super(groups_implied, self).create(cr, uid, values, context)
if users:
# delegate addition of users to add implied groups
self.write(cr, uid, [gid], {'users': users}, context)
return gid
def write(self, cr, uid, ids, values, context=None):
res = super(groups_implied, self).write(cr, uid, ids, values, context)
if values.get('users') or values.get('implied_ids'):
# add all implied groups (to all users of each group)
for g in self.browse(cr, uid, ids, context=context):
gids = map(int, g.trans_implied_ids)
vals = {'users': [(4, u.id) for u in g.users]}
super(groups_implied, self).write(cr, uid, gids, vals, context)
return res
class users_implied(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
groups = values.pop('groups_id', None)
user_id = super(users_implied, self).create(cr, uid, values, context)
if groups:
# delegate addition of groups to add implied groups
self.write(cr, uid, [user_id], {'groups_id': groups}, context)
self.pool['ir.ui.view'].clear_cache()
return user_id
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids,list):
ids = [ids]
res = super(users_implied, self).write(cr, uid, ids, values, context)
if values.get('groups_id'):
# add implied groups for all users
for user in self.browse(cr, uid, ids):
gs = set(concat(g.trans_implied_ids for g in user.groups_id))
vals = {'groups_id': [(4, g.id) for g in gs]}
super(users_implied, self).write(cr, uid, [user.id], vals, context)
self.pool['ir.ui.view'].clear_cache()
return res
#----------------------------------------------------------
# Vitrual checkbox and selection for res.user form view
#
# Extension of res.groups and res.users for the special groups view in the users
# form. This extension presents groups with selection and boolean widgets:
# - Groups are shown by application, with boolean and/or selection fields.
# Selection fields typically defines a role "Name" for the given application.
# - Uncategorized groups are presented as boolean fields and grouped in a
# section "Others".
#
# The user form view is modified by an inherited view (base.user_groups_view);
# the inherited view replaces the field 'groups_id' by a set of reified group
# fields (boolean or selection fields). The arch of that view is regenerated
# each time groups are changed.
#
# Naming conventions for reified groups fields:
# - boolean field 'in_group_ID' is True iff
# ID is in 'groups_id'
# - selection field 'sel_groups_ID1_..._IDk' is ID iff
# ID is in 'groups_id' and ID is maximal in the set {ID1, ..., IDk}
#----------------------------------------------------------
def name_boolean_group(id):
return 'in_group_' + str(id)
def name_selection_groups(ids):
return 'sel_groups_' + '_'.join(map(str, ids))
def is_boolean_group(name):
return name.startswith('in_group_')
def is_selection_groups(name):
return name.startswith('sel_groups_')
def is_reified_group(name):
return is_boolean_group(name) or is_selection_groups(name)
def get_boolean_group(name):
return int(name[9:])
def get_selection_groups(name):
return map(int, name[11:].split('_'))
def partition(f, xs):
"return a pair equivalent to (filter(f, xs), filter(lambda x: not f(x), xs))"
yes, nos = [], []
for x in xs:
(yes if f(x) else nos).append(x)
return yes, nos
def parse_m2m(commands):
"return a list of ids corresponding to a many2many value"
ids = []
for command in commands:
if isinstance(command, (tuple, list)):
if command[0] in (1, 4):
ids.append(command[1])
elif command[0] == 5:
ids = []
elif command[0] == 6:
ids = list(command[2])
else:
ids.append(command)
return ids
class groups_view(osv.osv):
_inherit = 'res.groups'
def create(self, cr, uid, values, context=None):
res = super(groups_view, self).create(cr, uid, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(groups_view, self).write(cr, uid, ids, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(groups_view, self).unlink(cr, uid, ids, context)
self.update_user_groups_view(cr, uid, context)
return res
def update_user_groups_view(self, cr, uid, context=None):
# the view with id 'base.user_groups_view' inherits the user form view,
# and introduces the reified group fields
# we have to try-catch this, because at first init the view does not exist
# but we are already creating some basic groups
if not context or context.get('install_mode'):
# use installation/admin language for translatable names in the view
context = dict(context or {})
context.update(self.pool['res.users'].context_get(cr, uid))
view = self.pool['ir.model.data'].xmlid_to_object(cr, SUPERUSER_ID, 'base.user_groups_view', context=context)
if view and view.exists() and view._name == 'ir.ui.view':
xml1, xml2 = [], []
xml1.append(E.separator(string=_('Application'), colspan="4"))
for app, kind, gs in self.get_groups_by_application(cr, uid, context):
# hide groups in category 'Hidden' (except to group_no_one)
attrs = {'groups': 'base.group_no_one'} if app and app.xml_id == 'base.module_category_hidden' else {}
if kind == 'selection':
# application name with a selection field
field_name = name_selection_groups(map(int, gs))
xml1.append(E.field(name=field_name, **attrs))
xml1.append(E.newline())
else:
# application separator with boolean fields
app_name = app and app.name or _('Other')
xml2.append(E.separator(string=app_name, colspan="4", **attrs))
for g in gs:
field_name = name_boolean_group(g.id)
xml2.append(E.field(name=field_name, **attrs))
xml = E.field(*(xml1 + xml2), name="groups_id", position="replace")
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
xml_content = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="utf-8")
view.write({'arch': xml_content})
return True
def get_application_groups(self, cr, uid, domain=None, context=None):
return self.search(cr, uid, domain or [])
def get_groups_by_application(self, cr, uid, context=None):
""" return all groups classified by application (module category), as a list of pairs:
[(app, kind, [group, ...]), ...],
where app and group are browse records, and kind is either 'boolean' or 'selection'.
Applications are given in sequence order. If kind is 'selection', the groups are
given in reverse implication order.
"""
def linearized(gs):
gs = set(gs)
# determine sequence order: a group should appear after its implied groups
order = dict.fromkeys(gs, 0)
for g in gs:
for h in gs.intersection(g.trans_implied_ids):
order[h] -= 1
# check whether order is total, i.e., sequence orders are distinct
if len(set(order.itervalues())) == len(gs):
return sorted(gs, key=lambda g: order[g])
return None
# classify all groups by application
gids = self.get_application_groups(cr, uid, context=context)
by_app, others = {}, []
for g in self.browse(cr, uid, gids, context):
if g.category_id:
by_app.setdefault(g.category_id, []).append(g)
else:
others.append(g)
# build the result
res = []
apps = sorted(by_app.iterkeys(), key=lambda a: a.sequence or 0)
for app in apps:
gs = linearized(by_app[app])
if gs:
res.append((app, 'selection', gs))
else:
res.append((app, 'boolean', by_app[app]))
if others:
res.append((False, 'boolean', others))
return res
class users_view(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
values = self._remove_reified_groups(values)
return super(users_view, self).create(cr, uid, values, context)
def write(self, cr, uid, ids, values, context=None):
values = self._remove_reified_groups(values)
return super(users_view, self).write(cr, uid, ids, values, context)
def _remove_reified_groups(self, values):
""" return `values` without reified group fields """
add, rem = [], []
values1 = {}
for key, val in values.iteritems():
if is_boolean_group(key):
(add if val else rem).append(get_boolean_group(key))
elif is_selection_groups(key):
rem += get_selection_groups(key)
if val:
add.append(val)
else:
values1[key] = val
if 'groups_id' not in values and (add or rem):
# remove group ids in `rem` and add group ids in `add`
values1['groups_id'] = zip(repeat(3), rem) + zip(repeat(4), add)
return values1
def default_get(self, cr, uid, fields, context=None):
group_fields, fields = partition(is_reified_group, fields)
fields1 = (fields + ['groups_id']) if group_fields else fields
values = super(users_view, self).default_get(cr, uid, fields1, context)
self._add_reified_groups(group_fields, values)
# add "default_groups_ref" inside the context to set default value for group_id with xml values
if 'groups_id' in fields and isinstance(context.get("default_groups_ref"), list):
groups = []
ir_model_data = self.pool.get('ir.model.data')
for group_xml_id in context["default_groups_ref"]:
group_split = group_xml_id.split('.')
if len(group_split) != 2:
raise osv.except_osv(_('Invalid context value'), _('Invalid context default_groups_ref value (model.name_id) : "%s"') % group_xml_id)
try:
temp, group_id = ir_model_data.get_object_reference(cr, uid, group_split[0], group_split[1])
except ValueError:
group_id = False
groups += [group_id]
values['groups_id'] = groups
return values
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
# determine whether reified groups fields are required, and which ones
fields1 = fields or self.fields_get(cr, uid, context=context).keys()
group_fields, other_fields = partition(is_reified_group, fields1)
# read regular fields (other_fields); add 'groups_id' if necessary
drop_groups_id = False
if group_fields and fields:
if 'groups_id' not in other_fields:
other_fields.append('groups_id')
drop_groups_id = True
else:
other_fields = fields
res = super(users_view, self).read(cr, uid, ids, other_fields, context=context, load=load)
# post-process result to add reified group fields
if group_fields:
for values in (res if isinstance(res, list) else [res]):
self._add_reified_groups(group_fields, values)
if drop_groups_id:
values.pop('groups_id', None)
return res
def _add_reified_groups(self, fields, values):
""" add the given reified group fields into `values` """
gids = set(parse_m2m(values.get('groups_id') or []))
for f in fields:
if is_boolean_group(f):
values[f] = get_boolean_group(f) in gids
elif is_selection_groups(f):
selected = [gid for gid in get_selection_groups(f) if gid in gids]
values[f] = selected and selected[-1] or False
def fields_get(self, cr, uid, allfields=None, context=None, write_access=True, attributes=None):
res = super(users_view, self).fields_get(cr, uid, allfields, context, write_access, attributes)
# add reified groups fields
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
return res
for app, kind, gs in self.pool['res.groups'].get_groups_by_application(cr, uid, context):
if kind == 'selection':
# selection group field
tips = ['%s: %s' % (g.name, g.comment) for g in gs if g.comment]
res[name_selection_groups(map(int, gs))] = {
'type': 'selection',
'string': app and app.name or _('Other'),
'selection': [(False, '')] + [(g.id, g.name) for g in gs],
'help': '\n'.join(tips),
'exportable': False,
'selectable': False,
}
else:
# boolean group fields
for g in gs:
res[name_boolean_group(g.id)] = {
'type': 'boolean',
'string': g.name,
'help': g.comment,
'exportable': False,
'selectable': False,
}
return res
#----------------------------------------------------------
# change password wizard
#----------------------------------------------------------
class change_password_wizard(osv.TransientModel):
"""
A wizard to manage the change of users' passwords
"""
_name = "change.password.wizard"
_description = "Change Password Wizard"
_columns = {
'user_ids': fields.one2many('change.password.user', 'wizard_id', string='Users'),
}
def _default_user_ids(self, cr, uid, context=None):
if context is None:
context = {}
user_model = self.pool['res.users']
user_ids = context.get('active_model') == 'res.users' and context.get('active_ids') or []
return [
(0, 0, {'user_id': user.id, 'user_login': user.login})
for user in user_model.browse(cr, uid, user_ids, context=context)
]
_defaults = {
'user_ids': _default_user_ids,
}
def change_password_button(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids, context=context)[0]
need_reload = any(uid == user.user_id.id for user in wizard.user_ids)
line_ids = [user.id for user in wizard.user_ids]
self.pool.get('change.password.user').change_password_button(cr, uid, line_ids, context=context)
if need_reload:
return {
'type': 'ir.actions.client',
'tag': 'reload'
}
return {'type': 'ir.actions.act_window_close'}
class change_password_user(osv.TransientModel):
"""
A model to configure users in the change password wizard
"""
_name = 'change.password.user'
_description = 'Change Password Wizard User'
_columns = {
'wizard_id': fields.many2one('change.password.wizard', string='Wizard', required=True),
'user_id': fields.many2one('res.users', string='User', required=True),
'user_login': fields.char('User Login', readonly=True),
'new_passwd': fields.char('New Password'),
}
_defaults = {
'new_passwd': '',
}
def change_password_button(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
line.user_id.write({'password': line.new_passwd})
# don't keep temporary passwords in the database longer than necessary
self.write(cr, uid, ids, {'new_passwd': False}, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
AutorestCI/azure-sdk-for-python
|
unreleased/azure-mgmt-machinelearning/azure/mgmt/machinelearning/models/module_asset_parameter.py
|
5
|
1309
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ModuleAssetParameter(Model):
"""Parameter definition for a module asset.
:param name: Parameter name.
:type name: str
:param parameter_type: Parameter type.
:type parameter_type: str
:param mode_values_info: Definitions for nested interface parameters if
this is a complex module parameter.
:type mode_values_info: dict
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameter_type': {'key': 'parameterType', 'type': 'str'},
'mode_values_info': {'key': 'modeValuesInfo', 'type': '{ModeValueInfo}'},
}
def __init__(self, name=None, parameter_type=None, mode_values_info=None):
self.name = name
self.parameter_type = parameter_type
self.mode_values_info = mode_values_info
|
mit
|
glatard/nipype
|
nipype/interfaces/camino/tests/test_auto_QBallMX.py
|
9
|
1370
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.camino.odf import QBallMX
def test_QBallMX_inputs():
input_map = dict(args=dict(argstr='%s',
),
basistype=dict(argstr='-basistype %s',
usedefault=True,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
order=dict(argstr='-order %d',
units='NA',
),
out_file=dict(argstr='> %s',
genfile=True,
position=-1,
),
rbfpointset=dict(argstr='-rbfpointset %d',
units='NA',
),
rbfsigma=dict(argstr='-rbfsigma %f',
units='NA',
),
scheme_file=dict(argstr='-schemefile %s',
mandatory=True,
),
smoothingsigma=dict(argstr='-smoothingsigma %f',
units='NA',
),
terminal_output=dict(nohash=True,
),
)
inputs = QBallMX.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_QBallMX_outputs():
output_map = dict(qmat=dict(),
)
outputs = QBallMX.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
bsd-3-clause
|
JohnS-01/coala
|
tests/parsing/GlobbingTest.py
|
29
|
12501
|
"""
Tests Globbing and related functions
Test Files are local and permanent and organized as follows:
GlobTestDir
├── SubDir1
│ ├── File11.py
│ └── File12.py
│ SubDir2
│ ├── File(with)parentheses.txt
│ └── File[with]brackets.txt
├── File1.x
├── File2.y
└── File3.z
"""
import os
import re
import unittest
from coalib.parsing.Globbing import (
_iter_alternatives, _iter_choices, _position_is_bracketed, fnmatch, glob,
glob_escape)
class TestFiles:
"""
Testfiles to check glob patterns on
"""
glob_test_root = os.path.split(__file__)[0]
glob_test_dir = os.path.join(glob_test_root, 'GlobTestDir')
dir1 = os.path.join(glob_test_dir, 'SubDir1')
file11 = os.path.join(dir1, 'File11.py')
file12 = os.path.join(dir1, 'File12.py')
dir2 = os.path.join(glob_test_dir, 'SubDir2')
file_paren = os.path.join(dir2, 'File(with)parentheses.txt')
file_brack = os.path.join(dir2, 'File[with]brackets.txt')
file1 = os.path.join(glob_test_dir, 'File1.x')
file2 = os.path.join(glob_test_dir, 'File2.y')
file3 = os.path.join(glob_test_dir, 'File3.z')
class GlobbingHelperFunctionsTest(unittest.TestCase):
def test_positions(self):
# pattern: [bracketed values]
pattern_positions_dict = {
'[]': [],
'[a]': [1],
'[][]': [1, 2],
'[]]]': [1],
'[[[]': [1, 2],
'[[[][]]]': [1, 2, 5],
'][': [],
'][][': [],
'[!]': [],
'[!c]': [1, 2],
'[!': []
}
for pattern, bracketed_positions in pattern_positions_dict.items():
for pos in range(len(pattern)):
if pos in bracketed_positions:
self.assertTrue(_position_is_bracketed(pattern, pos))
else:
self.assertFalse(_position_is_bracketed(pattern, pos))
def test_choices(self):
# pattern: [choices]
pattern_choices_dict = {
'': [''],
'a': ['a'],
'a|b': ['a', 'b'],
'a|b|c': ['a', 'b', 'c'],
'a|b[|]c': ['a', 'b[|]c'],
'a|[b|c]': ['a', '[b|c]'],
'a[|b|c]': ['a[|b|c]'],
'[a|b|c]': ['[a|b|c]'],
'[a]|[b]|[c]': ['[a]', '[b]', '[c]'],
'[[a]|[b]|[c]': ['[[a]', '[b]', '[c]']
}
for pattern, choices in pattern_choices_dict.items():
self.assertEqual(list(_iter_choices(pattern)), choices)
def test_alternatives(self):
# pattern: [alternatives]
pattern_alternatives_dict = {
'': [''],
'(ab)': ['ab'],
'a|b': ['a|b'],
'()': [''],
'(|)': [''],
'(a|b)': ['a', 'b'],
'(a|b|c)': ['a', 'b', 'c'],
'a(b|c)': ['ab', 'ac'],
'(a|b)(c|d)': ['ac', 'ad', 'bc', 'bd'],
'(a|b(c|d)': ['(a|bc', '(a|bd'],
'(a[|]b)': ['a[|]b'],
'[(]a|b)': ['[(]a|b)'],
}
for pattern, alternatives in pattern_alternatives_dict.items():
self.assertEqual(sorted(list(_iter_alternatives(pattern))),
sorted(alternatives))
class GlobEscapeTest(unittest.TestCase):
def test_glob_escape(self):
input_strings = [
'test',
'test[',
'test []',
'test [[]',
'test ]] str [',
'test[][]',
'test(',
'test)',
'test()',
'test (1)']
output_strings = [
'test',
'test[[]',
'test [[][]]',
'test [[][[][]]',
'test []][]] str [[]',
'test[[][]][[][]]',
'test[(]',
'test[)]',
'test[(][)]',
'test [(]1[)]']
for unescaped_str, escaped_str in zip(input_strings, output_strings):
self.assertEqual(glob_escape(unescaped_str), escaped_str)
class FnmatchTest(unittest.TestCase):
def _test_fnmatch(self, pattern, matches, non_matches):
for match in matches:
self.assertTrue(fnmatch(match, pattern))
for non_match in non_matches:
self.assertFalse(fnmatch(non_match, pattern))
def test_circumflex_in_set(self):
pattern = '[^abc]'
matches = ['^', 'a', 'b', 'c']
non_matches = ['d', 'e', 'f', 'g']
self._test_fnmatch(pattern, matches, non_matches)
def test_negative_set(self):
pattern = '[!ab]'
matches = ['c', 'd']
non_matches = ['a', 'b']
self._test_fnmatch(pattern, matches, non_matches)
def test_escaped_bracket(self):
pattern = '[]ab]'
matches = [']', 'a', 'b']
non_matches = ['[]ab]', 'ab]']
self._test_fnmatch(pattern, matches, non_matches)
def test_empty_set(self):
pattern = 'a[]b'
matches = ['a[]b']
non_matches = ['a', 'b', '[', ']', 'ab']
self._test_fnmatch(pattern, matches, non_matches)
def test_home_dir(self):
pattern = os.path.join('~', 'a', 'b')
matches = [os.path.expanduser(os.path.join('~', 'a', 'b'))]
non_matches = [os.path.join('~', 'a', 'b')]
self._test_fnmatch(pattern, matches, non_matches)
def test_alternatives(self):
pattern = '(a|b)'
matches = ['a', 'b']
non_matches = ['(a|b)', 'a|b']
self._test_fnmatch(pattern, matches, non_matches)
def test_set_precedence(self):
pattern = '(a|[b)]'
matches = ['(a|b', '(a|)']
non_matches = ['a]', '[b]']
self._test_fnmatch(pattern, matches, non_matches)
def test_single_sequence(self):
pattern = '([ab])'
matches = ['a', 'b']
non_matches = ['[ab]', 'ab']
self._test_fnmatch(pattern, matches, non_matches)
def test_questionmark(self):
pattern = 'a?b'
matches = ['axb', 'ayb']
non_matches = ['ab', 'aXXb']
self._test_fnmatch(pattern, matches, non_matches)
def test_asterisk(self):
pattern = 'a*b'
matches = ['axb', 'ayb']
non_matches = ['aXbX', os.path.join('a', 'b')]
self._test_fnmatch(pattern, matches, non_matches)
def test_double_asterisk(self):
pattern = 'a**b'
matches = ['axb', 'ayb', os.path.join('a', 'b')]
non_matches = ['aXbX']
self._test_fnmatch(pattern, matches, non_matches)
def test_multiple_patterns(self):
pattern = ['a**b', 'a**c']
matches = ['axb', 'axc']
non_matches = ['aXbX', 'aXcX']
self._test_fnmatch(pattern, matches, non_matches)
pattern = []
matches = ['anything', 'anything_else']
non_matches = []
self._test_fnmatch(pattern, matches, non_matches)
class GlobTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def _test_glob(self, pattern, file_list):
results = sorted([os.path.normcase(g) for g in glob(pattern)])
file_list = sorted([os.path.normcase(f) for f in file_list])
self.assertEqual([i for i in results
if re.search(r'(__pycache__|\.pyc)', i) is None],
file_list)
def test_collect_files(self):
pattern = os.path.join(TestFiles.glob_test_dir, 'Sub*', 'File1?.py')
file_list = [TestFiles.file11, TestFiles.file12]
self._test_glob(pattern, file_list)
def test_collect_dirs(self):
pattern = os.path.join(TestFiles.glob_test_dir, 'Sub*' + os.sep)
file_list = [TestFiles.dir1+os.sep, TestFiles.dir2+os.sep]
self._test_glob(pattern, file_list)
def test_collect_specific_dir(self):
pattern = os.path.join(TestFiles.dir1 + os.sep)
file_list = [TestFiles.dir1+os.sep]
self._test_glob(pattern, file_list)
def test_collect_flat(self):
pattern = os.path.join(TestFiles.glob_test_dir, '*')
file_list = [TestFiles.dir1,
TestFiles.dir2,
TestFiles.file1,
TestFiles.file2,
TestFiles.file3]
self._test_glob(pattern, file_list)
def test_collect_all(self):
pattern = os.path.join(TestFiles.glob_test_dir, '**', '*')
file_list = [TestFiles.dir1,
TestFiles.dir2,
TestFiles.file1,
TestFiles.file2,
TestFiles.file3,
TestFiles.file11,
TestFiles.file12,
TestFiles.file_paren,
TestFiles.file_brack]
self._test_glob(pattern, file_list)
def test_collect_basename(self):
pattern = TestFiles.glob_test_dir
file_list = [TestFiles.glob_test_dir]
self._test_glob(pattern, file_list)
def test_collect_none(self):
pattern = ''
file_list = []
self._test_glob(pattern, file_list)
def test_collect_specific(self):
pattern = os.path.join(TestFiles.file12)
file_list = [TestFiles.file12]
self._test_glob(pattern, file_list)
def test_collect_parentheses(self):
pattern = os.path.join(TestFiles.glob_test_dir,
'SubDir[12]',
'File[(]with)parentheses.txt')
file_list = [TestFiles.file_paren]
self._test_glob(pattern, file_list)
def test_collect_brackets(self):
pattern = os.path.join(TestFiles.glob_test_dir,
'SubDir[12]',
'File[[]with[]]brackets.txt')
file_list = [TestFiles.file_brack]
self._test_glob(pattern, file_list)
def test_collect_or(self):
pattern = os.path.join(TestFiles.glob_test_dir, 'File?.(x|y|z)')
file_list = [TestFiles.file1, TestFiles.file2, TestFiles.file3]
self._test_glob(pattern, file_list)
def test_wildcard_dir(self):
pattern = os.path.join(TestFiles.glob_test_dir, 'SubDir?', 'File11.py')
file_list = [TestFiles.file11]
self._test_glob(pattern, file_list)
def test_collect_recursive(self):
pattern = os.path.join(TestFiles.glob_test_dir, '**', '*')
file_list = [TestFiles.file1,
TestFiles.file2,
TestFiles.file3,
TestFiles.file11,
TestFiles.file12,
TestFiles.file_paren,
TestFiles.file_brack,
TestFiles.dir1,
TestFiles.dir2]
self._test_glob(pattern, file_list)
def test_collect_recursive_part_of_basename(self):
pattern = os.path.join(TestFiles.glob_test_dir, '**.(py|[xy])')
file_list = [TestFiles.file11,
TestFiles.file12,
TestFiles.file1,
TestFiles.file2]
self._test_glob(pattern, file_list)
def test_collect_invalid(self):
pattern = 'NOPE'
file_list = []
self._test_glob(pattern, file_list)
def test_no_dirname_recursive(self):
old_curdir = os.curdir
os.curdir = TestFiles.glob_test_dir
pattern = '**'
file_list = [TestFiles.file1,
TestFiles.file2,
TestFiles.file3,
TestFiles.file11,
TestFiles.file12,
TestFiles.file_paren,
TestFiles.file_brack,
TestFiles.dir1,
TestFiles.dir2]
results = sorted([os.path.normcase(os.path.join(os.curdir, g))
for g in glob(pattern)])
file_list = sorted([os.path.normcase(f) for f in file_list])
self.assertEqual([i for i in results
if re.search(r'(__pycache__|\.pyc)', i) is None],
file_list)
os.curdir = old_curdir
def test_no_dirname(self):
old_curdir = os.curdir
os.curdir = TestFiles.glob_test_dir
pattern = '*Dir?'
file_list = [TestFiles.dir1,
TestFiles.dir2]
results = sorted([os.path.normcase(os.path.join(os.curdir, g))
for g in glob(pattern)])
file_list = sorted([os.path.normcase(f) for f in file_list])
self.assertEqual(results, file_list)
os.curdir = old_curdir
|
agpl-3.0
|
jstammers/EDMSuite
|
NavPython/IronPython/Lib/imghdr.py
|
259
|
3544
|
"""Recognize image file formats based on their first few bytes."""
__all__ = ["what"]
#-------------------------#
# Recognize image headers #
#-------------------------#
def what(file, h=None):
if h is None:
if isinstance(file, basestring):
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
h = file.read(32)
file.seek(location)
f = None
else:
f = None
try:
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f: f.close()
return None
#---------------------------------#
# Subroutines per image file type #
#---------------------------------#
tests = []
def test_jpeg(h, f):
"""JPEG data in JFIF format"""
if h[6:10] == 'JFIF':
return 'jpeg'
tests.append(test_jpeg)
def test_exif(h, f):
"""JPEG data in Exif format"""
if h[6:10] == 'Exif':
return 'jpeg'
tests.append(test_exif)
def test_png(h, f):
if h[:8] == "\211PNG\r\n\032\n":
return 'png'
tests.append(test_png)
def test_gif(h, f):
"""GIF ('87 and '89 variants)"""
if h[:6] in ('GIF87a', 'GIF89a'):
return 'gif'
tests.append(test_gif)
def test_tiff(h, f):
"""TIFF (can be in Motorola or Intel byte order)"""
if h[:2] in ('MM', 'II'):
return 'tiff'
tests.append(test_tiff)
def test_rgb(h, f):
"""SGI image library"""
if h[:2] == '\001\332':
return 'rgb'
tests.append(test_rgb)
def test_pbm(h, f):
"""PBM (portable bitmap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '14' and h[2] in ' \t\n\r':
return 'pbm'
tests.append(test_pbm)
def test_pgm(h, f):
"""PGM (portable graymap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '25' and h[2] in ' \t\n\r':
return 'pgm'
tests.append(test_pgm)
def test_ppm(h, f):
"""PPM (portable pixmap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '36' and h[2] in ' \t\n\r':
return 'ppm'
tests.append(test_ppm)
def test_rast(h, f):
"""Sun raster file"""
if h[:4] == '\x59\xA6\x6A\x95':
return 'rast'
tests.append(test_rast)
def test_xbm(h, f):
"""X bitmap (X10 or X11)"""
s = '#define '
if h[:len(s)] == s:
return 'xbm'
tests.append(test_xbm)
def test_bmp(h, f):
if h[:2] == 'BM':
return 'bmp'
tests.append(test_bmp)
#--------------------#
# Small test program #
#--------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print filename + '/:',
if recursive or toplevel:
print 'recursing down:'
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print '*** directory (use -r) ***'
else:
print filename + ':',
sys.stdout.flush()
try:
print what(filename)
except IOError:
print '*** not found ***'
|
mit
|
CapeDrew/DCMTK-ITK
|
Wrapping/WrapITK/Languages/Ruby/Tests/notYetUsable/itkCurvatureFlowTestPython2.py
|
13
|
2703
|
from InsightToolkit import *
import itktesting
import sys
import os
import shutil
basename = os.path.basename( sys.argv[0] )
name = os.path.splitext( basename )[0]
dir = "Algorithms"
testInput = itktesting.ITK_TEST_INPUT
testOutput = itktesting.ITK_TEST_OUTPUT
baseLine = itktesting.ITK_TEST_BASELINE
reader = itkImageFileReaderF2_New()
reader.SetFileName( testInput+"/cthead1.png")
cf = itkCurvatureFlowImageFilterF2F2_New()
cf.SetInput( reader.GetOutput() )
cf.SetTimeStep( 0.25 )
cf.SetNumberOfIterations( 10 )
cfss = itkShiftScaleImageFilterF2US2_New()
cfss.SetInput( cf.GetOutput() )
cfss.SetShift( 0.7 )
cfss.SetScale( 0.9 )
valid = itkImageFileReaderUS2_New()
valid.SetFileName( baseLine+"/"+dir+"/"+name+".png")
diff = itkDifferenceImageFilterUS2_New()
diff.SetValidInput( valid.GetOutput() )
diff.SetTestInput( cfss.GetOutput() )
diff.SetToleranceRadius( 1 )
diff.SetDifferenceThreshold( 0 )
diff.Update()
meanDiff = diff.GetMeanDifference()
totalDiff = diff.GetTotalDifference()
print "MeanDifference = ", meanDiff
print "TotalDifference = ", totalDiff
print "<DartMeasurement name=\"MeanDifference\" type=\"numeric/double\">",meanDiff,"</DartMeasurement>"
print "<DartMeasurement name=\"TotalDifference\" type=\"numeric/double\">",totalDiff,"</DartMeasurement>"
if ( meanDiff > 0.1 ) :
convert = itkCastImageFilterUS2UC2_New()
rescale = itkRescaleIntensityImageFilterUS2UC2_New()
rescale.SetInput( diff.GetOutput() )
rescale.SetOutputMinimum( 0 )
rescale.SetOutputMaximum( 255 )
io = itkPNGImageIO_New()
io.SetUseCompression( 1 )
io.SetCompressionLevel( 9 )
writer = itkImageFileWriterUC2_New()
writer.SetImageIO( io.GetPointer() )
writer.SetInput( convert.GetOutput() )
writer.SetFileName( testOutput+"/"+name+".test.png" )
convert.SetInput( cfss.GetOutput() )
writer.Write()
writer.SetFileName( testOutput+"/"+name+".diff.png" )
writer.SetInput( rescale.GetOutput() )
writer.Write()
shutil.copyfile( baseLine+"/"+dir+"/"+name+".png", testOutput+"/"+name+".valid.png" )
print "<DartMeasurementFile name=\"TestImage\" type=\"image/png\">"+testOutput+"/"+name+".test.png</DartMeasurementFile>"
print "<DartMeasurementFile name=\"DifferenceImage\" type=\"image/png\">"+testOutput+"/"+name+".diff.png</DartMeasurementFile>"
print "<DartMeasurementFile name=\"ValidImage\" type=\"image/png\">"+testOutput+"/"+name+".valid.png</DartMeasurementFile>"
print "<DartMeasurement name=\"DifferenceShift\" type=\"numeric/double\">",rescale.GetShift(),"</DartMeasurement>"
print "<DartMeasurement name=\"DifferenceScale\" type=\"numeric/double\">",rescale.GetScale(),"</DartMeasurement>"
# return 1
#return 0
|
apache-2.0
|
rikai/podpublish
|
youtube_upload/auth/webkit_qt.py
|
1
|
1947
|
CHECK_AUTH_JS = """
var code = document.getElementById("code");
var access_denied = document.getElementById("access_denied");
var result;
if (code) {
result = {authorized: true, code: code.value};
} else if (access_denied) {
result = {authorized: false, message: access_denied.innerText};
} else {
result = {};
}
result;
"""
def _on_qt_page_load_finished(dialog, webview):
to_s = lambda x: (str(x.toUtf8()) if hasattr(x,'toUtf8') else x)
frame = webview.page().currentFrame()
try: #PySide does not QStrings
from QtCore import QString
jscode = QString(CHECK_AUTH_JS)
except ImportError:
jscode = CHECK_AUTH_JS
res = frame.evaluateJavaScript(jscode)
try:
authorization = dict((to_s(k), to_s(v)) for (k, v) in res.toPyObject().items())
except AttributeError: #PySide returns the result in pure Python
authorization = dict((to_s(k), to_s(v)) for (k, v) in res.items())
if "authorized" in authorization:
dialog.authorization_code = authorization.get("code")
dialog.close()
def get_code(url, size=(640, 480), title="Google authentication"):
"""Open a QT webkit window and return the access code."""
try:
from PyQt4 import QtCore, QtGui, QtWebKit
except ImportError:
from PySide import QtCore, QtGui, QtWebKit
app = QtGui.QApplication([])
dialog = QtGui.QDialog()
dialog.setWindowTitle(title)
dialog.resize(*size)
webview = QtWebKit.QWebView()
webpage = QtWebKit.QWebPage()
webview.setPage(webpage)
webpage.loadFinished.connect(lambda: _on_qt_page_load_finished(dialog, webview))
webview.setUrl(QtCore.QUrl.fromEncoded(url))
layout = QtGui.QGridLayout()
layout.addWidget(webview)
dialog.setLayout(layout)
dialog.authorization_code = None
dialog.show()
app.exec_()
return dialog.authorization_code
|
lgpl-2.1
|
HybridF5/jacket
|
jacket/tests/compute/unit/compute/test_compute_api.py
|
1
|
167887
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for compute API."""
import copy
import datetime
import iso8601
import mock
from mox3 import mox
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
from oslo_utils import uuidutils
from jacket import context
from jacket.compute import conductor
from jacket.compute import exception
from jacket.compute import policy
from jacket.compute import quota
from jacket.compute import test
from jacket.compute import utils
from jacket.compute.cloud import api as compute_api
from jacket.compute.cloud import arch
from jacket.compute.cloud import cells_api as compute_cells_api
from jacket.compute.cloud import flavors
from jacket.compute.cloud import instance_actions
from jacket.compute.cloud import rpcapi as compute_rpcapi
from jacket.compute.cloud import task_states
from jacket.compute.cloud import utils as compute_utils
from jacket.compute.cloud import vm_mode
from jacket.compute.cloud import vm_states
from jacket.compute.volume import cinder
from jacket.db import compute
from jacket.objects import compute
from jacket.objects.compute import base as obj_base
from jacket.objects.compute import fields as fields_obj
from jacket.objects.compute import quotas as quotas_obj
from jacket.tests.compute import uuidsentinel as uuids
from jacket.tests.compute.unit import fake_block_device
from jacket.tests.compute.unit import fake_instance
from jacket.tests.compute.unit import fake_volume
from jacket.tests.compute.unit import matchers
from jacket.tests.compute.unit.image import fake as fake_image
from jacket.tests.compute.unit.objects import test_flavor
from jacket.tests.compute.unit.objects import test_migration
from jacket.tests.compute.unit.objects import test_service
from oslo_messaging import exceptions as oslo_exceptions
FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
SHELVED_IMAGE = 'fake-shelved-image'
SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound'
SHELVED_IMAGE_NOT_AUTHORIZED = 'fake-shelved-image-not-authorized'
SHELVED_IMAGE_EXCEPTION = 'fake-shelved-image-exception'
class _ComputeAPIUnitTestMixIn(object):
def setUp(self):
super(_ComputeAPIUnitTestMixIn, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.compute_api = compute_api.API()
self.context = context.RequestContext(self.user_id,
self.project_id)
def _get_vm_states(self, exclude_states=None):
vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
if not exclude_states:
exclude_states = set()
return vm_state - exclude_states
def _create_flavor(self, **updates):
flavor = {'id': 1,
'flavorid': 1,
'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'vcpu_weight': None,
'root_gb': 1,
'ephemeral_gb': 0,
'rxtx_factor': 1,
'swap': 0,
'deleted': 0,
'disabled': False,
'is_public': True,
'deleted_at': None,
'created_at': datetime.datetime(2012, 1, 19, 18,
49, 30, 877329),
'updated_at': None,
}
if updates:
flavor.update(updates)
return compute.Flavor._from_db_object(self.context, compute.Flavor(),
flavor)
def _create_instance_obj(self, params=None, flavor=None):
"""Create a test instance."""
if not params:
params = {}
if flavor is None:
flavor = self._create_flavor()
now = timeutils.utcnow()
instance = compute.Instance()
instance.metadata = {}
instance.metadata.update(params.pop('metadata', {}))
instance.system_metadata = params.pop('system_metadata', {})
instance._context = self.context
instance.id = 1
instance.uuid = uuidutils.generate_uuid()
instance.cell_name = 'api!child'
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.image_ref = FAKE_IMAGE_REF
instance.reservation_id = 'r-fakeres'
instance.user_id = self.user_id
instance.project_id = self.project_id
instance.host = 'fake_host'
instance.node = NODENAME
instance.instance_type_id = flavor.id
instance.ami_launch_index = 0
instance.memory_mb = 0
instance.vcpus = 0
instance.root_gb = 0
instance.ephemeral_gb = 0
instance.architecture = arch.X86_64
instance.os_type = 'Linux'
instance.locked = False
instance.created_at = now
instance.updated_at = now
instance.launched_at = now
instance.disable_terminate = False
instance.info_cache = compute.InstanceInfoCache()
instance.flavor = flavor
instance.old_flavor = instance.new_flavor = None
if params:
instance.update(params)
instance.obj_reset_changes()
return instance
def _obj_to_list_obj(self, list_obj, obj):
list_obj.objects = []
list_obj.objects.append(obj)
list_obj._context = self.context
list_obj.obj_reset_changes()
return list_obj
def test_create_quota_exceeded_messages(self):
image_href = "image_href"
image_id = 0
instance_type = self._create_flavor()
self.mox.StubOutWithMock(self.compute_api, "_get_image")
self.mox.StubOutWithMock(quota.QUOTAS, "limit_check")
self.mox.StubOutWithMock(quota.QUOTAS, "reserve")
quotas = {'instances': 1, 'cores': 1, 'ram': 1}
usages = {r: {'in_use': 1, 'reserved': 1} for r in
['instances', 'cores', 'ram']}
quota_exception = exception.OverQuota(quotas=quotas,
usages=usages, overs=['instances'])
for _unused in range(2):
self.compute_api._get_image(self.context, image_href).AndReturn(
(image_id, {}))
quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg())
quota.QUOTAS.reserve(self.context, instances=40,
cores=mox.IsA(int),
expire=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg(),
ram=mox.IsA(int)).AndRaise(quota_exception)
self.mox.ReplayAll()
for min_count, message in [(20, '20-40'), (40, '40')]:
try:
self.compute_api.create(self.context, instance_type,
"image_href", min_count=min_count,
max_count=40)
except exception.TooManyInstances as e:
self.assertEqual(message, e.kwargs['req'])
else:
self.fail("Exception not raised")
def _test_create_max_net_count(self, max_net_count, min_count, max_count):
with test.nested(
mock.patch.object(self.compute_api, '_get_image',
return_value=(None, {})),
mock.patch.object(self.compute_api, '_check_auto_disk_config'),
mock.patch.object(self.compute_api,
'_validate_and_build_base_options',
return_value=({}, max_net_count))
) as (
get_image,
check_auto_disk_config,
validate_and_build_base_options
):
self.assertRaises(exception.PortLimitExceeded,
self.compute_api.create, self.context, 'fake_flavor',
'image_id', min_count=min_count, max_count=max_count)
def test_max_net_count_zero(self):
# Test when max_net_count is zero.
max_net_count = 0
min_count = 2
max_count = 3
self._test_create_max_net_count(max_net_count, min_count, max_count)
def test_max_net_count_less_than_min_count(self):
# Test when max_net_count is nonzero but less than min_count.
max_net_count = 1
min_count = 2
max_count = 3
self._test_create_max_net_count(max_net_count, min_count, max_count)
def test_specified_port_and_multiple_instances_neutronv2(self):
# Tests that if port is specified there is only one instance booting
# (i.e max_count == 1) as we can't share the same port across multiple
# instances.
self.flags(use_neutron=True)
port = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.0.1'
min_count = 1
max_count = 2
requested_networks = compute.NetworkRequestList(
compute=[compute.NetworkRequest(address=address,
port_id=port)])
self.assertRaises(exception.MultiplePortsNotApplicable,
self.compute_api.create, self.context, 'fake_flavor', 'image_id',
min_count=min_count, max_count=max_count,
requested_networks=requested_networks)
def _test_specified_ip_and_multiple_instances_helper(self,
requested_networks):
# Tests that if ip is specified there is only one instance booting
# (i.e max_count == 1)
min_count = 1
max_count = 2
self.assertRaises(exception.InvalidFixedIpAndMaxCountRequest,
self.compute_api.create, self.context, "fake_flavor", 'image_id',
min_count=min_count, max_count=max_count,
requested_networks=requested_networks)
def test_specified_ip_and_multiple_instances(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.0.1'
requested_networks = compute.NetworkRequestList(
compute=[compute.NetworkRequest(network_id=network,
address=address)])
self._test_specified_ip_and_multiple_instances_helper(
requested_networks)
def test_specified_ip_and_multiple_instances_neutronv2(self):
self.flags(use_neutron=True)
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
address = '10.0.0.1'
requested_networks = compute.NetworkRequestList(
compute=[compute.NetworkRequest(network_id=network,
address=address)])
self._test_specified_ip_and_multiple_instances_helper(
requested_networks)
@mock.patch.object(compute_rpcapi.JacketAPI, 'reserve_block_device_name')
def test_create_volume_bdm_call_reserve_dev_name(self, mock_reserve):
bdm = compute.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 1,
}))
mock_reserve.return_value = bdm
instance = self._create_instance_obj()
result = self.compute_api._create_volume_bdm(self.context,
instance,
'vda',
'1',
None,
None)
self.assertTrue(mock_reserve.called)
self.assertEqual(result, bdm)
@mock.patch.object(compute.BlockDeviceMapping, 'create')
def test_create_volume_bdm_local_creation(self, bdm_create):
instance = self._create_instance_obj()
volume_id = 'fake-vol-id'
bdm = compute.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'instance_uuid': instance.uuid,
'volume_id': volume_id,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': None,
'disk_bus': None,
'device_type': None
}))
result = self.compute_api._create_volume_bdm(self.context,
instance,
'/dev/vda',
volume_id,
None,
None,
is_local_creation=True)
self.assertEqual(result.instance_uuid, bdm.instance_uuid)
self.assertIsNone(result.device_name)
self.assertEqual(result.volume_id, bdm.volume_id)
self.assertTrue(bdm_create.called)
@mock.patch.object(compute_rpcapi.JacketAPI, 'reserve_block_device_name')
@mock.patch.object(compute_rpcapi.JacketAPI, 'attach_volume')
def test_attach_volume(self, mock_attach, mock_reserve):
instance = self._create_instance_obj()
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
None, None, None, None, None)
fake_bdm = mock.MagicMock(spec=compute.BlockDeviceMapping)
mock_reserve.return_value = fake_bdm
mock_volume_api = mock.patch.object(self.compute_api, 'volume_api',
mock.MagicMock(spec=cinder.API))
with mock_volume_api as mock_v_api:
mock_v_api.get.return_value = volume
self.compute_api.attach_volume(
self.context, instance, volume['id'])
mock_v_api.check_attach.assert_called_once_with(self.context,
volume,
instance=instance)
mock_v_api.reserve_volume.assert_called_once_with(self.context,
volume['id'])
mock_attach.assert_called_once_with(self.context,
instance, fake_bdm)
@mock.patch.object(compute_rpcapi.JacketAPI, 'reserve_block_device_name')
@mock.patch.object(compute_rpcapi.JacketAPI, 'attach_volume')
def test_attach_volume_reserve_fails(self, mock_attach, mock_reserve):
instance = self._create_instance_obj()
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
None, None, None, None, None)
fake_bdm = mock.MagicMock(spec=compute.BlockDeviceMapping)
mock_reserve.return_value = fake_bdm
mock_volume_api = mock.patch.object(self.compute_api, 'volume_api',
mock.MagicMock(spec=cinder.API))
with mock_volume_api as mock_v_api:
mock_v_api.get.return_value = volume
mock_v_api.reserve_volume.side_effect = test.TestingException()
self.assertRaises(test.TestingException,
self.compute_api.attach_volume,
self.context, instance, volume['id'])
mock_v_api.check_attach.assert_called_once_with(self.context,
volume,
instance=instance)
mock_v_api.reserve_volume.assert_called_once_with(self.context,
volume['id'])
self.assertEqual(0, mock_attach.call_count)
fake_bdm.destroy.assert_called_once_with()
def test_suspend(self):
# Ensure instance can be suspended.
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.jacket_rpcapi
self.mox.StubOutWithMock(rpcapi, 'suspend_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.SUSPEND)
rpcapi.suspend_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.suspend(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertEqual(task_states.SUSPENDING,
instance.task_state)
def _test_suspend_fails(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertIsNone(instance.task_state)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.suspend,
self.context, instance)
def test_suspend_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
for state in invalid_vm_states:
self._test_suspend_fails(state)
def test_resume(self):
# Ensure instance can be resumed (if suspended).
instance = self._create_instance_obj(
params=dict(vm_state=vm_states.SUSPENDED))
self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.jacket_rpcapi
self.mox.StubOutWithMock(rpcapi, 'resume_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.RESUME)
rpcapi.resume_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.resume(self.context, instance)
self.assertEqual(vm_states.SUSPENDED, instance.vm_state)
self.assertEqual(task_states.RESUMING,
instance.task_state)
def test_start(self):
params = dict(vm_state=vm_states.STOPPED)
instance = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.START)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.jacket_rpcapi
self.mox.StubOutWithMock(rpcapi, 'start_instance')
rpcapi.start_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.start(self.context, instance)
self.assertEqual(task_states.POWERING_ON,
instance.task_state)
def test_start_invalid_state(self):
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.start,
self.context, instance)
def test_start_no_host(self):
params = dict(vm_state=vm_states.STOPPED, host='')
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceNotReady,
self.compute_api.start,
self.context, instance)
def _test_stop(self, vm_state, force=False, clean_shutdown=True):
# Make sure 'progress' gets reset
params = dict(task_state=None, progress=99, vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.STOP)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.jacket_rpcapi
self.mox.StubOutWithMock(rpcapi, 'stop_instance')
rpcapi.stop_instance(self.context, instance, do_cast=True,
clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
if force:
self.compute_api.force_stop(self.context, instance,
clean_shutdown=clean_shutdown)
else:
self.compute_api.stop(self.context, instance,
clean_shutdown=clean_shutdown)
self.assertEqual(task_states.POWERING_OFF,
instance.task_state)
self.assertEqual(0, instance.progress)
def test_stop(self):
self._test_stop(vm_states.ACTIVE)
def test_stop_stopped_instance_with_bypass(self):
self._test_stop(vm_states.STOPPED, force=True)
def test_stop_forced_shutdown(self):
self._test_stop(vm_states.ACTIVE, force=True)
def test_stop_without_clean_shutdown(self):
self._test_stop(vm_states.ACTIVE,
clean_shutdown=False)
def test_stop_forced_without_clean_shutdown(self):
self._test_stop(vm_states.ACTIVE, force=True,
clean_shutdown=False)
def _test_stop_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.stop,
self.context, instance)
def test_stop_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
vm_states.ERROR]))
for state in invalid_vm_states:
self._test_stop_invalid_state(state)
def test_stop_a_stopped_inst(self):
params = {'vm_state': vm_states.STOPPED}
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.stop,
self.context, instance)
def test_stop_no_host(self):
params = {'host': ''}
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceNotReady,
self.compute_api.stop,
self.context, instance)
@mock.patch('compute.compute.api.API._record_action_start')
@mock.patch('compute.compute.rpcapi.ComputeAPI.trigger_crash_dump')
def test_trigger_crash_dump(self,
trigger_crash_dump,
_record_action_start):
instance = self._create_instance_obj()
self.compute_api.trigger_crash_dump(self.context, instance)
_record_action_start.assert_called_once_with(self.context, instance,
instance_actions.TRIGGER_CRASH_DUMP)
if self.cell_type == 'api':
# cell api has not been implemented.
pass
else:
trigger_crash_dump.assert_called_once_with(self.context, instance)
self.assertIsNone(instance.task_state)
def test_trigger_crash_dump_invalid_state(self):
params = dict(vm_state=vm_states.STOPPED)
instance = self._create_instance_obj(params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.trigger_crash_dump,
self.context, instance)
def test_trigger_crash_dump_no_host(self):
params = dict(host='')
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceNotReady,
self.compute_api.trigger_crash_dump,
self.context, instance)
def test_trigger_crash_dump_locked(self):
params = dict(locked=True)
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceIsLocked,
self.compute_api.trigger_crash_dump,
self.context, instance)
def _test_shelve(self, vm_state=vm_states.ACTIVE,
boot_from_volume=False, clean_shutdown=True):
params = dict(task_state=None, vm_state=vm_state,
display_name='fake-name')
instance = self._create_instance_obj(params=params)
with test.nested(
mock.patch.object(self.compute_api, 'is_volume_backed_instance',
return_value=boot_from_volume),
mock.patch.object(self.compute_api, '_create_image',
return_value=dict(id='fake-image-id')),
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api, '_record_action_start'),
mock.patch.object(self.compute_api.jacket_rpcapi,
'shelve_instance'),
mock.patch.object(self.compute_api.jacket_rpcapi,
'shelve_offload_instance')
) as (
volume_backed_inst, create_image, instance_save,
record_action_start, rpcapi_shelve_instance,
rpcapi_shelve_offload_instance
):
self.compute_api.shelve(self.context, instance,
clean_shutdown=clean_shutdown)
# assert field values set on the instance object
self.assertEqual(task_states.SHELVING, instance.task_state)
# assert our mock calls
volume_backed_inst.assert_called_once_with(
self.context, instance)
instance_save.assert_called_once_with(expected_task_state=[None])
record_action_start.assert_called_once_with(
self.context, instance, instance_actions.SHELVE)
if boot_from_volume:
rpcapi_shelve_offload_instance.assert_called_once_with(
self.context, instance=instance,
clean_shutdown=clean_shutdown)
else:
rpcapi_shelve_instance.assert_called_once_with(
self.context, instance=instance, image_id='fake-image-id',
clean_shutdown=clean_shutdown)
def test_shelve(self):
self._test_shelve()
def test_shelve_stopped(self):
self._test_shelve(vm_state=vm_states.STOPPED)
def test_shelve_paused(self):
self._test_shelve(vm_state=vm_states.PAUSED)
def test_shelve_suspended(self):
self._test_shelve(vm_state=vm_states.SUSPENDED)
def test_shelve_boot_from_volume(self):
self._test_shelve(boot_from_volume=True)
def test_shelve_forced_shutdown(self):
self._test_shelve(clean_shutdown=False)
def test_shelve_boot_from_volume_forced_shutdown(self):
self._test_shelve(boot_from_volume=True,
clean_shutdown=False)
def _test_shelve_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.shelve,
self.context, instance)
def test_shelve_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
vm_states.STOPPED,
vm_states.PAUSED,
vm_states.SUSPENDED]))
for state in invalid_vm_states:
self._test_shelve_invalid_state(state)
def _test_shelve_offload(self, clean_shutdown=True):
params = dict(task_state=None, vm_state=vm_states.SHELVED)
instance = self._create_instance_obj(params=params)
with test.nested(
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api.jacket_rpcapi,
'shelve_offload_instance')
) as (
instance_save, rpcapi_shelve_offload_instance
):
self.compute_api.shelve_offload(self.context, instance,
clean_shutdown=clean_shutdown)
# assert field values set on the instance object
self.assertEqual(task_states.SHELVING_OFFLOADING,
instance.task_state)
instance_save.assert_called_once_with(expected_task_state=[None])
rpcapi_shelve_offload_instance.assert_called_once_with(
self.context, instance=instance,
clean_shutdown=clean_shutdown)
def test_shelve_offload(self):
self._test_shelve_offload()
def test_shelve_offload_forced_shutdown(self):
self._test_shelve_offload(clean_shutdown=False)
def _test_shelve_offload_invalid_state(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.shelve_offload,
self.context, instance)
def test_shelve_offload_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.SHELVED]))
for state in invalid_vm_states:
self._test_shelve_offload_invalid_state(state)
def _test_reboot_type(self, vm_state, reboot_type, task_state=None):
# Ensure instance can be soft rebooted.
inst = self._create_instance_obj()
inst.vm_state = vm_state
inst.task_state = task_state
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(inst, 'save')
expected_task_state = [None]
if reboot_type == 'HARD':
expected_task_state.extend([task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.SUSPENDING])
inst.save(expected_task_state=expected_task_state)
self.compute_api._record_action_start(self.context, inst,
instance_actions.REBOOT)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.jacket_rpcapi
self.mox.StubOutWithMock(rpcapi, 'reboot_instance')
rpcapi.reboot_instance(self.context, instance=inst,
block_device_info=None,
reboot_type=reboot_type)
self.mox.ReplayAll()
self.compute_api.reboot(self.context, inst, reboot_type)
def _test_reboot_type_fails(self, reboot_type, **updates):
inst = self._create_instance_obj()
inst.update(updates)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.reboot,
self.context, inst, reboot_type)
def test_reboot_hard_active(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD')
def test_reboot_hard_error(self):
self._test_reboot_type(vm_states.ERROR, 'HARD')
def test_reboot_hard_rebooting(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD',
task_state=task_states.REBOOTING)
def test_reboot_hard_reboot_started(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD',
task_state=task_states.REBOOT_STARTED)
def test_reboot_hard_reboot_pending(self):
self._test_reboot_type(vm_states.ACTIVE, 'HARD',
task_state=task_states.REBOOT_PENDING)
def test_reboot_hard_rescued(self):
self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED)
def test_reboot_hard_resuming(self):
self._test_reboot_type(vm_states.ACTIVE,
'HARD', task_state=task_states.RESUMING)
def test_reboot_hard_pausing(self):
self._test_reboot_type(vm_states.ACTIVE,
'HARD', task_state=task_states.PAUSING)
def test_reboot_hard_unpausing(self):
self._test_reboot_type(vm_states.ACTIVE,
'HARD', task_state=task_states.UNPAUSING)
def test_reboot_hard_suspending(self):
self._test_reboot_type(vm_states.ACTIVE,
'HARD', task_state=task_states.SUSPENDING)
def test_reboot_hard_error_not_launched(self):
self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR,
launched_at=None)
def test_reboot_soft(self):
self._test_reboot_type(vm_states.ACTIVE, 'SOFT')
def test_reboot_soft_error(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR)
def test_reboot_soft_paused(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.PAUSED)
def test_reboot_soft_stopped(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.STOPPED)
def test_reboot_soft_suspended(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.SUSPENDED)
def test_reboot_soft_rebooting(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING)
def test_reboot_soft_rebooting_hard(self):
self._test_reboot_type_fails('SOFT',
task_state=task_states.REBOOTING_HARD)
def test_reboot_soft_reboot_started(self):
self._test_reboot_type_fails('SOFT',
task_state=task_states.REBOOT_STARTED)
def test_reboot_soft_reboot_pending(self):
self._test_reboot_type_fails('SOFT',
task_state=task_states.REBOOT_PENDING)
def test_reboot_soft_rescued(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED)
def test_reboot_soft_error_not_launched(self):
self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR,
launched_at=None)
def test_reboot_soft_resuming(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.RESUMING)
def test_reboot_soft_pausing(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.PAUSING)
def test_reboot_soft_unpausing(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.UNPAUSING)
def test_reboot_soft_suspending(self):
self._test_reboot_type_fails('SOFT', task_state=task_states.SUSPENDING)
def _test_delete_resizing_part(self, inst, deltas):
old_flavor = inst.old_flavor
deltas['cores'] = -old_flavor.vcpus
deltas['ram'] = -old_flavor.memory_mb
def _test_delete_resized_part(self, inst):
migration = compute.Migration._from_db_object(
self.context, compute.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(compute.Migration,
'get_by_instance_and_status')
self.context.elevated().AndReturn(self.context)
compute.Migration.get_by_instance_and_status(
self.context, inst.uuid, 'finished').AndReturn(migration)
compute_utils.downsize_quota_delta(self.context,
inst).AndReturn('deltas')
fake_quotas = compute.Quotas.from_reservations(self.context,
['rsvs'])
compute_utils.reserve_quota_delta(self.context, 'deltas',
inst).AndReturn(fake_quotas)
self.compute_api._record_action_start(
self.context, inst, instance_actions.CONFIRM_RESIZE)
self.compute_api.jacket_rpcapi.confirm_resize(
self.context, inst, migration,
migration['source_compute'], fake_quotas.reservations, cast=False)
def _test_delete_shelved_part(self, inst):
image_api = self.compute_api.image_api
self.mox.StubOutWithMock(image_api, 'delete')
snapshot_id = inst.system_metadata.get('shelved_image_id')
if snapshot_id == SHELVED_IMAGE:
image_api.delete(self.context, snapshot_id).AndReturn(True)
elif snapshot_id == SHELVED_IMAGE_NOT_FOUND:
image_api.delete(self.context, snapshot_id).AndRaise(
exception.ImageNotFound(image_id=snapshot_id))
elif snapshot_id == SHELVED_IMAGE_NOT_AUTHORIZED:
image_api.delete(self.context, snapshot_id).AndRaise(
exception.ImageNotAuthorized(image_id=snapshot_id))
elif snapshot_id == SHELVED_IMAGE_EXCEPTION:
image_api.delete(self.context, snapshot_id).AndRaise(
test.TestingException("Unexpected error"))
def _test_downed_host_part(self, inst, updates, delete_time, delete_type):
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context, inst,
'%s.start' % delete_type)
self.context.elevated().AndReturn(self.context)
self.compute_api.network_api.deallocate_for_instance(
self.context, inst)
state = ('soft' in delete_type and vm_states.SOFT_DELETED or
vm_states.DELETED)
updates.update({'vm_state': state,
'task_state': None,
'terminated_at': delete_time})
inst.save()
updates.update({'deleted_at': delete_time,
'deleted': True})
fake_inst = fake_instance.fake_db_instance(**updates)
self.compute_api._local_cleanup_bdm_volumes([], inst, self.context)
compute.instance_destroy(self.context, inst.uuid,
constraint=None).AndReturn(fake_inst)
compute_utils.notify_about_instance_usage(
self.compute_api.notifier,
self.context, inst, '%s.end' % delete_type,
system_metadata=inst.system_metadata)
def _test_delete(self, delete_type, **attrs):
reservations = ['fake-resv']
inst = self._create_instance_obj()
inst.update(attrs)
inst._context = self.context
deltas = {'instances': -1,
'cores': -inst.vcpus,
'ram': -inst.memory_mb}
delete_time = datetime.datetime(1955, 11, 5, 9, 30,
tzinfo=iso8601.iso8601.Utc())
self.useFixture(utils_fixture.TimeFixture(delete_time))
task_state = (delete_type == 'soft_delete' and
task_states.SOFT_DELETING or task_states.DELETING)
updates = {'progress': 0, 'task_state': task_state}
if delete_type == 'soft_delete':
updates['deleted_at'] = delete_time
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(compute.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(compute, 'service_get_by_compute_host')
self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
'service_is_up')
self.mox.StubOutWithMock(compute_utils, 'downsize_quota_delta')
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(compute, 'instance_update_and_get_original')
self.mox.StubOutWithMock(self.compute_api.network_api,
'deallocate_for_instance')
self.mox.StubOutWithMock(compute, 'instance_system_metadata_get')
self.mox.StubOutWithMock(compute, 'instance_destroy')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
rpcapi = self.compute_api.jacket_rpcapi
self.mox.StubOutWithMock(rpcapi, 'confirm_resize')
if (inst.vm_state in
(vm_states.SHELVED, vm_states.SHELVED_OFFLOADED)):
self._test_delete_shelved_part(inst)
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance')
compute.BlockDeviceMappingList.get_by_instance_uuid(
self.context, inst.uuid).AndReturn([])
inst.save()
if inst.task_state == task_states.RESIZE_FINISH:
self._test_delete_resizing_part(inst, deltas)
quota.QUOTAS.reserve(self.context, project_id=inst.project_id,
user_id=inst.user_id,
expire=mox.IgnoreArg(),
**deltas).AndReturn(reservations)
# NOTE(comstud): This is getting messy. But what we are wanting
# to test is:
# If cells is enabled and we're the API cell:
# * Cast to cells_rpcapi.<method> with reservations=None
# * Commit reservations
# Otherwise:
# * Check for downed host
# * If downed host:
# * Clean up instance, destroying it, sending notifications.
# (Tested in _test_downed_host_part())
# * Commit reservations
# * If not downed host:
# * Record the action start.
# * Cast to compute_rpcapi.<method> with the reservations
cast = True
commit_quotas = True
soft_delete = False
if self.cell_type != 'api':
if inst.vm_state == vm_states.RESIZED:
self._test_delete_resized_part(inst)
if inst.vm_state == vm_states.SOFT_DELETED:
soft_delete = True
if inst.vm_state != vm_states.SHELVED_OFFLOADED:
self.context.elevated().AndReturn(self.context)
compute.service_get_by_compute_host(
self.context, inst.host).AndReturn(
test_service.fake_service)
self.compute_api.servicegroup_api.service_is_up(
mox.IsA(compute.Service)).AndReturn(
inst.host != 'down-host')
if (inst.host == 'down-host' or
inst.vm_state == vm_states.SHELVED_OFFLOADED):
self._test_downed_host_part(inst, updates, delete_time,
delete_type)
cast = False
else:
# Happens on the manager side
commit_quotas = False
if cast:
if self.cell_type != 'api':
self.compute_api._record_action_start(self.context, inst,
instance_actions.DELETE)
if commit_quotas or soft_delete:
cast_reservations = None
else:
cast_reservations = reservations
if delete_type == 'soft_delete':
rpcapi.soft_delete_instance(self.context, inst,
reservations=cast_reservations)
elif delete_type in ['delete', 'force_delete']:
rpcapi.terminate_instance(self.context, inst, [],
reservations=cast_reservations,
delete_type=delete_type)
if commit_quotas:
# Local delete or when we're testing API cell.
quota.QUOTAS.commit(self.context, reservations,
project_id=inst.project_id,
user_id=inst.user_id)
self.mox.ReplayAll()
getattr(self.compute_api, delete_type)(self.context, inst)
for k, v in updates.items():
self.assertEqual(inst[k], v)
self.mox.UnsetStubs()
def test_delete(self):
self._test_delete('delete')
def test_delete_if_not_launched(self):
self._test_delete('delete', launched_at=None)
def test_delete_in_resizing(self):
old_flavor = compute.Flavor(vcpus=1, memory_mb=512, extra_specs={})
self._test_delete('delete',
task_state=task_states.RESIZE_FINISH,
old_flavor=old_flavor)
def test_delete_in_resized(self):
self._test_delete('delete', vm_state=vm_states.RESIZED)
def test_delete_shelved(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
self._test_delete('delete',
vm_state=vm_states.SHELVED,
system_metadata=fake_sys_meta)
def test_delete_shelved_offloaded(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
self._test_delete('delete',
vm_state=vm_states.SHELVED_OFFLOADED,
system_metadata=fake_sys_meta)
def test_delete_shelved_image_not_found(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_FOUND}
self._test_delete('delete',
vm_state=vm_states.SHELVED_OFFLOADED,
system_metadata=fake_sys_meta)
def test_delete_shelved_image_not_authorized(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_AUTHORIZED}
self._test_delete('delete',
vm_state=vm_states.SHELVED_OFFLOADED,
system_metadata=fake_sys_meta)
def test_delete_shelved_exception(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_EXCEPTION}
self._test_delete('delete',
vm_state=vm_states.SHELVED,
system_metadata=fake_sys_meta)
def test_delete_with_down_host(self):
self._test_delete('delete', host='down-host')
def test_delete_soft_with_down_host(self):
self._test_delete('soft_delete', host='down-host')
def test_delete_soft(self):
self._test_delete('soft_delete')
def test_delete_forced(self):
fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
for vm_state in self._get_vm_states():
if vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED):
self._test_delete('force_delete',
vm_state=vm_state,
system_metadata=fake_sys_meta)
self._test_delete('force_delete', vm_state=vm_state)
def test_delete_fast_if_host_not_set(self):
inst = self._create_instance_obj()
inst.host = ''
quotas = quotas_obj.Quotas(self.context)
updates = {'progress': 0, 'task_state': task_states.DELETING}
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(compute,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(compute, 'constraint')
self.mox.StubOutWithMock(compute, 'instance_destroy')
self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.jacket_rpcapi
self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
compute.block_device_mapping_get_all_by_instance(self.context,
inst.uuid).AndReturn([])
inst.save()
self.compute_api._create_reservations(self.context,
inst, inst.task_state,
inst.project_id, inst.user_id
).AndReturn(quotas)
if self.cell_type == 'api':
rpcapi.terminate_instance(
self.context, inst,
mox.IsA(compute.BlockDeviceMappingList),
reservations=None, delete_type='delete')
else:
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.start')
compute.constraint(host=mox.IgnoreArg()).AndReturn('constraint')
delete_time = datetime.datetime(1955, 11, 5, 9, 30,
tzinfo=iso8601.iso8601.Utc())
updates['deleted_at'] = delete_time
updates['deleted'] = True
fake_inst = fake_instance.fake_db_instance(**updates)
compute.instance_destroy(self.context, inst.uuid,
constraint='constraint').AndReturn(fake_inst)
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.end',
system_metadata=inst.system_metadata)
self.mox.ReplayAll()
self.compute_api.delete(self.context, inst)
for k, v in updates.items():
self.assertEqual(inst[k], v)
def _fake_do_delete(context, instance, bdms,
rservations=None, local=False):
pass
def test_local_delete_with_deleted_volume(self):
bdms = [compute.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 42, 'volume_id': 'volume_id',
'source_type': 'volume', 'destination_type': 'volume',
'delete_on_termination': False}))]
inst = self._create_instance_obj()
inst._context = self.context
self.mox.StubOutWithMock(inst, 'destroy')
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(self.compute_api.network_api,
'deallocate_for_instance')
self.mox.StubOutWithMock(compute, 'instance_system_metadata_get')
self.mox.StubOutWithMock(compute_utils,
'notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute_api.volume_api,
'terminate_connection')
self.mox.StubOutWithMock(compute.BlockDeviceMapping, 'destroy')
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.start')
self.context.elevated().MultipleTimes().AndReturn(self.context)
if self.cell_type != 'api':
self.compute_api.network_api.deallocate_for_instance(
self.context, inst)
self.compute_api.volume_api.terminate_connection(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
AndRaise(exception. VolumeNotFound('volume_id'))
bdms[0].destroy()
inst.destroy()
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.end',
system_metadata=inst.system_metadata)
self.mox.ReplayAll()
self.compute_api._local_delete(self.context, inst, bdms,
'delete',
self._fake_do_delete)
def test_local_delete_without_info_cache(self):
inst = self._create_instance_obj()
with test.nested(
mock.patch.object(inst, 'destroy'),
mock.patch.object(self.context, 'elevated'),
mock.patch.object(self.compute_api.network_api,
'deallocate_for_instance'),
mock.patch.object(compute, 'instance_system_metadata_get'),
mock.patch.object(compute_utils,
'notify_about_instance_usage')
) as (
inst_destroy, context_elevated, net_api_deallocate_for_instance,
db_instance_system_metadata_get, notify_about_instance_usage
):
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.start')
self.context.elevated().MultipleTimes().AndReturn(self.context)
if self.cell_type != 'api':
self.compute_api.network_api.deallocate_for_instance(
self.context, inst)
inst.destroy()
compute_utils.notify_about_instance_usage(
self.compute_api.notifier, self.context,
inst, 'delete.end',
system_metadata=inst.system_metadata)
inst.info_cache = None
self.compute_api._local_delete(self.context, inst, [],
'delete',
self._fake_do_delete)
def test_delete_disabled(self):
inst = self._create_instance_obj()
inst.disable_terminate = True
self.mox.StubOutWithMock(compute, 'instance_update_and_get_original')
self.mox.ReplayAll()
self.compute_api.delete(self.context, inst)
def test_delete_soft_rollback(self):
inst = self._create_instance_obj()
self.mox.StubOutWithMock(compute,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(inst, 'save')
delete_time = datetime.datetime(1955, 11, 5)
self.useFixture(utils_fixture.TimeFixture(delete_time))
compute.block_device_mapping_get_all_by_instance(
self.context, inst.uuid).AndReturn([])
inst.save().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute_api.soft_delete, self.context, inst)
def _test_confirm_resize(self, mig_ref_passed=False):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = compute.Migration._from_db_object(
self.context, compute.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(compute.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(compute_utils, 'downsize_quota_delta')
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
self.mox.StubOutWithMock(fake_mig, 'save')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.jacket_rpcapi,
'confirm_resize')
self.context.elevated().AndReturn(self.context)
if not mig_ref_passed:
compute.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(
fake_mig)
compute_utils.downsize_quota_delta(self.context,
fake_inst).AndReturn('deltas')
resvs = ['resvs']
fake_quotas = compute.Quotas.from_reservations(self.context, resvs)
compute_utils.reserve_quota_delta(self.context, 'deltas',
fake_inst).AndReturn(fake_quotas)
def _check_mig(expected_task_state=None):
self.assertEqual('confirming', fake_mig.status)
fake_mig.save().WithSideEffects(_check_mig)
if self.cell_type:
fake_quotas.commit()
self.compute_api._record_action_start(self.context, fake_inst,
'confirmResize')
self.compute_api.jacket_rpcapi.confirm_resize(
self.context, fake_inst, fake_mig, 'compute-source',
[] if self.cell_type else fake_quotas.reservations)
self.mox.ReplayAll()
if mig_ref_passed:
self.compute_api.confirm_resize(self.context, fake_inst,
migration=fake_mig)
else:
self.compute_api.confirm_resize(self.context, fake_inst)
def test_confirm_resize(self):
self._test_confirm_resize()
def test_confirm_resize_with_migration_ref(self):
self._test_confirm_resize(mig_ref_passed=True)
def _test_revert_resize(self):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = compute.Migration._from_db_object(
self.context, compute.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(compute.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(compute_utils,
'reverse_upsize_quota_delta')
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.mox.StubOutWithMock(fake_mig, 'save')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.jacket_rpcapi,
'revert_resize')
self.context.elevated().AndReturn(self.context)
compute.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(
fake_mig)
compute_utils.reverse_upsize_quota_delta(
self.context, fake_inst).AndReturn('deltas')
resvs = ['resvs']
fake_quotas = compute.Quotas.from_reservations(self.context, resvs)
compute_utils.reserve_quota_delta(self.context, 'deltas',
fake_inst).AndReturn(fake_quotas)
def _check_state(expected_task_state=None):
self.assertEqual(task_states.RESIZE_REVERTING,
fake_inst.task_state)
fake_inst.save(expected_task_state=[None]).WithSideEffects(
_check_state)
def _check_mig(expected_task_state=None):
self.assertEqual('reverting', fake_mig.status)
fake_mig.save().WithSideEffects(_check_mig)
if self.cell_type:
fake_quotas.commit()
self.compute_api._record_action_start(self.context, fake_inst,
'revertResize')
self.compute_api.jacket_rpcapi.revert_resize(
self.context, fake_inst, fake_mig, 'compute-dest',
[] if self.cell_type else fake_quotas.reservations)
self.mox.ReplayAll()
self.compute_api.revert_resize(self.context, fake_inst)
def test_revert_resize(self):
self._test_revert_resize()
def test_revert_resize_concurrent_fail(self):
params = dict(vm_state=vm_states.RESIZED)
fake_inst = self._create_instance_obj(params=params)
fake_mig = compute.Migration._from_db_object(
self.context, compute.Migration(),
test_migration.fake_db_migration())
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(compute.Migration,
'get_by_instance_and_status')
self.mox.StubOutWithMock(compute_utils,
'reverse_upsize_quota_delta')
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.context.elevated().AndReturn(self.context)
compute.Migration.get_by_instance_and_status(
self.context, fake_inst['uuid'], 'finished').AndReturn(fake_mig)
delta = ['delta']
compute_utils.reverse_upsize_quota_delta(
self.context, fake_inst).AndReturn(delta)
resvs = ['resvs']
fake_quotas = compute.Quotas.from_reservations(self.context, resvs)
compute_utils.reserve_quota_delta(
self.context, delta, fake_inst).AndReturn(fake_quotas)
exc = exception.UnexpectedTaskStateError(
instance_uuid=fake_inst['uuid'],
actual={'task_state': task_states.RESIZE_REVERTING},
expected={'task_state': [None]})
fake_inst.save(expected_task_state=[None]).AndRaise(exc)
fake_quotas.rollback()
self.mox.ReplayAll()
self.assertRaises(exception.UnexpectedTaskStateError,
self.compute_api.revert_resize,
self.context,
fake_inst)
def _test_resize(self, flavor_id_passed=True,
same_host=False, allow_same_host=False,
project_id=None,
extra_kwargs=None,
same_flavor=False,
clean_shutdown=True):
if extra_kwargs is None:
extra_kwargs = {}
self.flags(allow_resize_to_same_host=allow_same_host)
params = {}
if project_id is not None:
# To test instance w/ different project id than context (admin)
params['project_id'] = project_id
fake_inst = self._create_instance_obj(params=params)
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
self.mox.StubOutWithMock(compute_utils, 'upsize_quota_delta')
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
self.mox.StubOutWithMock(fake_inst, 'save')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
current_flavor = fake_inst.get_flavor()
if flavor_id_passed:
new_flavor = self._create_flavor(id=200, flavorid='new-flavor-id',
name='new_flavor', disabled=False)
if same_flavor:
new_flavor.id = current_flavor.id
flavors.get_flavor_by_flavor_id(
'new-flavor-id',
read_deleted='no').AndReturn(new_flavor)
else:
new_flavor = current_flavor
if (self.cell_type == 'compute' or
not (flavor_id_passed and same_flavor)):
resvs = ['resvs']
project_id, user_id = quotas_obj.ids_from_instance(self.context,
fake_inst)
fake_quotas = compute.Quotas.from_reservations(self.context,
resvs)
if flavor_id_passed:
compute_utils.upsize_quota_delta(
self.context, mox.IsA(compute.Flavor),
mox.IsA(compute.Flavor)).AndReturn('deltas')
compute_utils.reserve_quota_delta(
self.context, 'deltas', fake_inst).AndReturn(fake_quotas)
def _check_state(expected_task_state=None):
self.assertEqual(task_states.RESIZE_PREP,
fake_inst.task_state)
self.assertEqual(fake_inst.progress, 0)
for key, value in extra_kwargs.items():
self.assertEqual(value, getattr(fake_inst, key))
fake_inst.save(expected_task_state=[None]).WithSideEffects(
_check_state)
if allow_same_host:
filter_properties = {'ignore_hosts': []}
else:
filter_properties = {'ignore_hosts': [fake_inst['host']]}
if flavor_id_passed:
expected_reservations = fake_quotas.reservations
else:
expected_reservations = []
if self.cell_type == 'api':
fake_quotas.commit()
expected_reservations = []
mig = compute.Migration()
def _get_migration(context=None):
return mig
def _check_mig():
self.assertEqual(fake_inst.uuid, mig.instance_uuid)
self.assertEqual(current_flavor.id,
mig.old_instance_type_id)
self.assertEqual(new_flavor.id,
mig.new_instance_type_id)
self.assertEqual('finished', mig.status)
if new_flavor.id != current_flavor.id:
self.assertEqual('resize', mig.migration_type)
else:
self.assertEqual('migration', mig.migration_type)
self.stubs.Set(compute, 'Migration', _get_migration)
self.mox.StubOutWithMock(self.context, 'elevated')
self.mox.StubOutWithMock(mig, 'create')
self.context.elevated().AndReturn(self.context)
mig.create().WithSideEffects(_check_mig)
if flavor_id_passed:
self.compute_api._record_action_start(self.context, fake_inst,
'resize')
else:
self.compute_api._record_action_start(self.context, fake_inst,
'migrate')
scheduler_hint = {'filter_properties': filter_properties}
self.compute_api.compute_task_api.resize_instance(
self.context, fake_inst, extra_kwargs,
scheduler_hint=scheduler_hint,
flavor=mox.IsA(compute.Flavor),
reservations=expected_reservations,
clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
if flavor_id_passed:
self.compute_api.resize(self.context, fake_inst,
flavor_id='new-flavor-id',
clean_shutdown=clean_shutdown,
**extra_kwargs)
else:
self.compute_api.resize(self.context, fake_inst,
clean_shutdown=clean_shutdown,
**extra_kwargs)
def _test_migrate(self, *args, **kwargs):
self._test_resize(*args, flavor_id_passed=False, **kwargs)
def test_resize(self):
self._test_resize()
def test_resize_with_kwargs(self):
self._test_resize(extra_kwargs=dict(cow='moo'))
def test_resize_same_host_and_allowed(self):
self._test_resize(same_host=True, allow_same_host=True)
def test_resize_same_host_and_not_allowed(self):
self._test_resize(same_host=True, allow_same_host=False)
def test_resize_different_project_id(self):
self._test_resize(project_id='different')
def test_resize_forced_shutdown(self):
self._test_resize(clean_shutdown=False)
def test_migrate(self):
self._test_migrate()
def test_migrate_with_kwargs(self):
self._test_migrate(extra_kwargs=dict(cow='moo'))
def test_migrate_same_host_and_allowed(self):
self._test_migrate(same_host=True, allow_same_host=True)
def test_migrate_same_host_and_not_allowed(self):
self._test_migrate(same_host=True, allow_same_host=False)
def test_migrate_different_project_id(self):
self._test_migrate(project_id='different')
def test_resize_invalid_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these.
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = self._create_instance_obj()
exc = exception.FlavorNotFound(flavor_id='flavor-id')
flavors.get_flavor_by_flavor_id('flavor-id',
read_deleted='no').AndRaise(exc)
self.mox.ReplayAll()
with mock.patch.object(fake_inst, 'save') as mock_save:
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
self.assertFalse(mock_save.called)
def test_resize_disabled_flavor_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
# Should never reach these.
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = self._create_instance_obj()
fake_flavor = self._create_flavor(id=200, flavorid='flavor-id',
name='foo', disabled=True)
flavors.get_flavor_by_flavor_id(
'flavor-id', read_deleted='no').AndReturn(fake_flavor)
self.mox.ReplayAll()
with mock.patch.object(fake_inst, 'save') as mock_save:
self.assertRaises(exception.FlavorNotFound,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
self.assertFalse(mock_save.called)
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
def test_resize_to_zero_disk_flavor_fails(self, get_flavor_by_flavor_id):
fake_inst = self._create_instance_obj()
fake_flavor = self._create_flavor(id=200, flavorid='flavor-id',
name='foo', root_gb=0)
get_flavor_by_flavor_id.return_value = fake_flavor
with mock.patch.object(self.compute_api,
'is_volume_backed_instance',
return_value=False):
self.assertRaises(exception.CannotResizeDisk,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
@mock.patch('compute.compute.api.API._record_action_start')
@mock.patch('compute.compute.api.API._resize_cells_support')
@mock.patch('compute.conductor.conductor_api.ComputeTaskAPI.resize_instance')
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
def test_resize_to_zero_disk_flavor_volume_backed(self,
get_flavor_by_flavor_id,
resize_instance_mock,
cells_support_mock,
record_mock):
params = dict(image_ref='')
fake_inst = self._create_instance_obj(params=params)
fake_flavor = self._create_flavor(id=200, flavorid='flavor-id',
name='foo', root_gb=0)
get_flavor_by_flavor_id.return_value = fake_flavor
@mock.patch.object(self.compute_api, 'is_volume_backed_instance',
return_value=True)
@mock.patch.object(fake_inst, 'save')
def do_test(mock_save, mock_volume):
self.compute_api.resize(self.context, fake_inst,
flavor_id='flavor-id')
mock_volume.assert_called_once_with(self.context, fake_inst)
do_test()
def test_resize_quota_exceeds_fails(self):
self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
self.mox.StubOutWithMock(compute_utils, 'upsize_quota_delta')
self.mox.StubOutWithMock(compute_utils, 'reserve_quota_delta')
# Should never reach these.
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
self.mox.StubOutWithMock(self.compute_api.compute_task_api,
'resize_instance')
fake_inst = self._create_instance_obj()
fake_flavor = self._create_flavor(id=200, flavorid='flavor-id',
name='foo', disabled=False)
flavors.get_flavor_by_flavor_id(
'flavor-id', read_deleted='no').AndReturn(fake_flavor)
deltas = dict(resource=0)
compute_utils.upsize_quota_delta(
self.context, mox.IsA(compute.Flavor),
mox.IsA(compute.Flavor)).AndReturn(deltas)
usage = dict(in_use=0, reserved=0)
quotas = {'resource': 0}
usages = {'resource': usage}
overs = ['resource']
over_quota_args = dict(quotas=quotas,
usages=usages,
overs=overs)
compute_utils.reserve_quota_delta(self.context, deltas,
fake_inst).AndRaise(
exception.OverQuota(**over_quota_args))
self.mox.ReplayAll()
with mock.patch.object(fake_inst, 'save') as mock_save:
self.assertRaises(exception.TooManyInstances,
self.compute_api.resize, self.context,
fake_inst, flavor_id='flavor-id')
self.assertFalse(mock_save.called)
def test_check_instance_quota_exceeds_with_multiple_resources(self):
quotas = {'cores': 1, 'instances': 1, 'ram': 512}
usages = {'cores': dict(in_use=1, reserved=0),
'instances': dict(in_use=1, reserved=0),
'ram': dict(in_use=512, reserved=0)}
overs = ['cores', 'instances', 'ram']
over_quota_args = dict(quotas=quotas,
usages=usages,
overs=overs)
e = exception.OverQuota(**over_quota_args)
fake_flavor = self._create_flavor()
instance_num = 1
with mock.patch.object(compute.Quotas, 'reserve', side_effect=e):
try:
self.compute_api._check_num_instances_quota(self.context,
fake_flavor,
instance_num,
instance_num)
except exception.TooManyInstances as e:
self.assertEqual('cores, instances, ram', e.kwargs['overs'])
self.assertEqual('1, 1, 512', e.kwargs['req'])
self.assertEqual('1, 1, 512', e.kwargs['used'])
self.assertEqual('1, 1, 512', e.kwargs['allowed'])
else:
self.fail("Exception not raised")
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
@mock.patch.object(compute.Quotas, 'reserve')
def test_resize_instance_quota_exceeds_with_multiple_resources(
self, mock_reserve, mock_get_flavor):
quotas = {'cores': 1, 'ram': 512}
usages = {'cores': dict(in_use=1, reserved=0),
'ram': dict(in_use=512, reserved=0)}
overs = ['cores', 'ram']
over_quota_args = dict(quotas=quotas,
usages=usages,
overs=overs)
mock_reserve.side_effect = exception.OverQuota(**over_quota_args)
mock_get_flavor.return_value = self._create_flavor(id=333,
vcpus=3,
memory_mb=1536)
try:
self.compute_api.resize(self.context, self._create_instance_obj(),
'fake_flavor_id')
except exception.TooManyInstances as e:
self.assertEqual('cores, ram', e.kwargs['overs'])
self.assertEqual('2, 1024', e.kwargs['req'])
self.assertEqual('1, 512', e.kwargs['used'])
self.assertEqual('1, 512', e.kwargs['allowed'])
mock_get_flavor.assert_called_once_with('fake_flavor_id',
read_deleted="no")
else:
self.fail("Exception not raised")
def test_pause(self):
# Ensure instance can be paused.
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.jacket_rpcapi
self.mox.StubOutWithMock(rpcapi, 'pause_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.PAUSE)
rpcapi.pause_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.pause(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertEqual(task_states.PAUSING,
instance.task_state)
def _test_pause_fails(self, vm_state):
params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertIsNone(instance.task_state)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.pause,
self.context, instance)
def test_pause_fails_invalid_states(self):
invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
for state in invalid_vm_states:
self._test_pause_fails(state)
def test_unpause(self):
# Ensure instance can be unpaused.
params = dict(vm_state=vm_states.PAUSED)
instance = self._create_instance_obj(params=params)
self.assertEqual(instance.vm_state, vm_states.PAUSED)
self.assertIsNone(instance.task_state)
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api,
'_record_action_start')
if self.cell_type == 'api':
rpcapi = self.compute_api.cells_rpcapi
else:
rpcapi = self.compute_api.jacket_rpcapi
self.mox.StubOutWithMock(rpcapi, 'unpause_instance')
instance.save(expected_task_state=[None])
self.compute_api._record_action_start(self.context,
instance, instance_actions.UNPAUSE)
rpcapi.unpause_instance(self.context, instance)
self.mox.ReplayAll()
self.compute_api.unpause(self.context, instance)
self.assertEqual(vm_states.PAUSED, instance.vm_state)
self.assertEqual(task_states.UNPAUSING, instance.task_state)
def test_live_migrate_active_vm_state(self):
instance = self._create_instance_obj()
self._live_migrate_instance(instance)
def test_live_migrate_paused_vm_state(self):
paused_state = dict(vm_state=vm_states.PAUSED)
instance = self._create_instance_obj(params=paused_state)
self._live_migrate_instance(instance)
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(compute.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(compute.InstanceAction, 'action_start')
@mock.patch.object(compute.Instance, 'save')
def test_live_migrate_messaging_timeout(self, _save, _action, get_spec,
add_instance_fault_from_exc):
instance = self._create_instance_obj()
if self.cell_type == 'api':
api = self.compute_api.cells_rpcapi
else:
api = conductor.api.ComputeTaskAPI
with mock.patch.object(api, 'live_migrate_instance',
side_effect=oslo_exceptions.MessagingTimeout):
self.assertRaises(oslo_exceptions.MessagingTimeout,
self.compute_api.live_migrate,
self.context, instance,
host_name='fake_dest_host',
block_migration=True, disk_over_commit=True)
add_instance_fault_from_exc.assert_called_once_with(
self.context,
instance,
mock.ANY)
@mock.patch.object(compute.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(compute.Instance, 'save')
@mock.patch.object(compute.InstanceAction, 'action_start')
def _live_migrate_instance(self, instance, _save, _action, get_spec):
# TODO(gilliard): This logic is upside-down (different
# behaviour depending on which class this method is mixed-into. Once
# we have cellsv2 we can remove this kind of logic from this test
if self.cell_type == 'api':
api = self.compute_api.cells_rpcapi
else:
api = conductor.api.ComputeTaskAPI
fake_spec = compute.RequestSpec()
get_spec.return_value = fake_spec
with mock.patch.object(api, 'live_migrate_instance') as task:
self.compute_api.live_migrate(self.context, instance,
block_migration=True,
disk_over_commit=True,
host_name='fake_dest_host')
self.assertEqual(task_states.MIGRATING, instance.task_state)
task.assert_called_once_with(self.context, instance,
'fake_dest_host',
block_migration=True,
disk_over_commit=True,
request_spec=fake_spec)
def test_swap_volume_volume_api_usage(self):
# This test ensures that volume_id arguments are passed to volume_api
# and that volumes return to previous states in case of error.
def fake_vol_api_begin_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
volumes[volume_id]['status'] = 'detaching'
def fake_vol_api_roll_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'detaching':
volumes[volume_id]['status'] = 'in-use'
def fake_vol_api_reserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
self.assertEqual(volumes[volume_id]['status'], 'available')
volumes[volume_id]['status'] = 'attaching'
def fake_vol_api_unreserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'attaching':
volumes[volume_id]['status'] = 'available'
def fake_swap_volume_exc(context, instance, old_volume_id,
new_volume_id):
raise AttributeError # Random exception
# Should fail if VM state is not valid
instance = fake_instance.fake_instance_obj(None, **{
'vm_state': vm_states.BUILDING,
'launched_at': timeutils.utcnow(),
'locked': False,
'availability_zone': 'fake_az',
'uuid': uuids.vol_instance})
volumes = {}
old_volume_id = uuidutils.generate_uuid()
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'attach_status': 'attached',
'size': 5,
'status': 'in-use',
'multiattach': False,
'attachments': {uuids.vol_instance: {
'attachment_id': 'fakeid'
}
}
}
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'attach_status': 'detached',
'size': 5,
'status': 'available',
'multiattach': False}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
# Should fail if old volume is not attached
volumes[old_volume_id]['attach_status'] = 'detached'
self.assertRaises(exception.VolumeUnattached,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['attach_status'] = 'attached'
# Should fail if old volume's instance_uuid is not that of the instance
volumes[old_volume_id]['attachments'] = {uuids.vol_instance_2:
{'attachment_id': 'fakeid'}}
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['attachments'] = {uuids.vol_instance:
{'attachment_id': 'fakeid'}}
# Should fail if new volume is attached
volumes[new_volume_id]['attach_status'] = 'attached'
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[new_volume_id]['attach_status'] = 'detached'
# Should fail if new volume is smaller than the old volume
volumes[new_volume_id]['size'] = 4
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[new_volume_id]['size'] = 5
# Fail call to swap_volume
self.stubs.Set(self.compute_api.volume_api, 'begin_detaching',
fake_vol_api_begin_detaching)
self.stubs.Set(self.compute_api.volume_api, 'roll_detaching',
fake_vol_api_roll_detaching)
self.stubs.Set(self.compute_api.volume_api, 'reserve_volume',
fake_vol_api_reserve)
self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume',
fake_vol_api_unreserve)
self.stubs.Set(self.compute_api.jacket_rpcapi, 'swap_volume',
fake_swap_volume_exc)
self.assertRaises(AttributeError,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
# Should succeed
self.stubs.Set(self.compute_api.jacket_rpcapi, 'swap_volume',
lambda c, instance, old_volume_id, new_volume_id: True)
self.compute_api.swap_volume(self.context, instance,
volumes[old_volume_id],
volumes[new_volume_id])
def _test_snapshot_and_backup(self, is_snapshot=True,
with_base_ref=False, min_ram=None,
min_disk=None,
create_fails=False,
instance_vm_state=vm_states.ACTIVE):
params = dict(locked=True)
instance = self._create_instance_obj(params=params)
instance.vm_state = instance_vm_state
# 'cache_in_nova' is for testing non-inheritable properties
# 'user_id' should also not be carried from sys_meta into
# image property...since it should be set explicitly by
# _create_image() in compute api.
fake_image_meta = {
'is_public': True,
'name': 'base-name',
'disk_format': 'fake',
'container_format': 'fake',
'properties': {
'user_id': 'meow',
'foo': 'bar',
'blah': 'bug?',
'cache_in_nova': 'dropped',
},
}
image_type = is_snapshot and 'snapshot' or 'backup'
sent_meta = {
'is_public': False,
'name': 'fake-name',
'disk_format': 'fake',
'container_format': 'fake',
'properties': {
'user_id': self.context.user_id,
'instance_uuid': instance.uuid,
'image_type': image_type,
'foo': 'bar',
'blah': 'bug?',
'cow': 'moo',
'cat': 'meow',
},
}
if is_snapshot:
if min_ram is not None:
fake_image_meta['min_ram'] = min_ram
sent_meta['min_ram'] = min_ram
if min_disk is not None:
fake_image_meta['min_disk'] = min_disk
sent_meta['min_disk'] = min_disk
sent_meta.pop('disk_format', None)
sent_meta.pop('container_format', None)
else:
sent_meta['properties']['backup_type'] = 'fake-backup-type'
extra_props = dict(cow='moo', cat='meow')
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
self.mox.StubOutWithMock(self.compute_api.image_api,
'create')
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute_api.jacket_rpcapi,
'snapshot_instance')
self.mox.StubOutWithMock(self.compute_api.jacket_rpcapi,
'backup_instance')
if not is_snapshot:
self.mox.StubOutWithMock(self.compute_api,
'is_volume_backed_instance')
self.compute_api.is_volume_backed_instance(self.context,
instance).AndReturn(False)
utils.get_image_from_system_metadata(
instance.system_metadata).AndReturn(fake_image_meta)
fake_image = dict(id='fake-image-id')
mock_method = self.compute_api.image_api.create(
self.context, sent_meta)
if create_fails:
mock_method.AndRaise(test.TestingException())
else:
mock_method.AndReturn(fake_image)
def check_state(expected_task_state=None):
expected_state = (is_snapshot and
task_states.IMAGE_SNAPSHOT_PENDING or
task_states.IMAGE_BACKUP)
self.assertEqual(expected_state, instance.task_state)
if not create_fails:
instance.save(expected_task_state=[None]).WithSideEffects(
check_state)
if is_snapshot:
self.compute_api.jacket_rpcapi.snapshot_instance(
self.context, instance, fake_image['id'])
else:
self.compute_api.jacket_rpcapi.backup_instance(
self.context, instance, fake_image['id'],
'fake-backup-type', 'fake-rotation')
self.mox.ReplayAll()
got_exc = False
try:
if is_snapshot:
res = self.compute_api.snapshot(self.context, instance,
'fake-name',
extra_properties=extra_props)
else:
res = self.compute_api.backup(self.context, instance,
'fake-name',
'fake-backup-type',
'fake-rotation',
extra_properties=extra_props)
self.assertEqual(fake_image, res)
except test.TestingException:
got_exc = True
self.assertEqual(create_fails, got_exc)
self.mox.UnsetStubs()
def test_snapshot(self):
self._test_snapshot_and_backup()
def test_snapshot_fails(self):
self._test_snapshot_and_backup(create_fails=True)
def test_snapshot_invalid_state(self):
instance = self._create_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_BACKUP
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
instance.vm_state = vm_states.BUILDING
instance.task_state = None
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.snapshot,
self.context, instance, 'fake-name')
def test_snapshot_with_base_image_ref(self):
self._test_snapshot_and_backup(with_base_ref=True)
def test_snapshot_min_ram(self):
self._test_snapshot_and_backup(min_ram=42)
def test_snapshot_min_disk(self):
self._test_snapshot_and_backup(min_disk=42)
def test_backup(self):
for state in [vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED]:
self._test_snapshot_and_backup(is_snapshot=False,
instance_vm_state=state)
def test_backup_fails(self):
self._test_snapshot_and_backup(is_snapshot=False, create_fails=True)
def test_backup_invalid_state(self):
instance = self._create_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.backup,
self.context, instance, 'fake-name',
'fake', 'fake')
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_BACKUP
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.backup,
self.context, instance, 'fake-name',
'fake', 'fake')
instance.vm_state = vm_states.BUILDING
instance.task_state = None
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.backup,
self.context, instance, 'fake-name',
'fake', 'fake')
def test_backup_with_base_image_ref(self):
self._test_snapshot_and_backup(is_snapshot=False,
with_base_ref=True)
def test_backup_volume_backed_instance(self):
instance = self._create_instance_obj()
with mock.patch.object(self.compute_api,
'is_volume_backed_instance',
return_value=True) as mock_is_volume_backed:
self.assertRaises(exception.InvalidRequest,
self.compute_api.backup, self.context,
instance, 'fake-name', 'weekly',
3, extra_properties={})
mock_is_volume_backed.assert_called_once_with(self.context,
instance)
def _test_snapshot_volume_backed(self, quiesce_required, quiesce_fails,
vm_state=vm_states.ACTIVE):
fake_sys_meta = {'image_min_ram': '11',
'image_min_disk': '22',
'image_container_format': 'ami',
'image_disk_format': 'ami',
'image_ram_disk': 'fake_ram_disk_id',
'image_bdm_v2': 'True',
'image_block_device_mapping': '[]',
'image_mappings': '[]',
'image_cache_in_nova': 'True'}
if quiesce_required:
fake_sys_meta['image_os_require_quiesce'] = 'yes'
params = dict(locked=True, vm_state=vm_state,
system_metadata=fake_sys_meta)
instance = self._create_instance_obj(params=params)
instance['root_device_name'] = 'vda'
instance_bdms = []
expect_meta = {
'name': 'test-snapshot',
'properties': {'root_device_name': 'vda',
'ram_disk': 'fake_ram_disk_id'},
'size': 0,
'min_disk': '22',
'is_public': False,
'min_ram': '11',
}
if quiesce_required:
expect_meta['properties']['os_require_quiesce'] = 'yes'
quiesced = [False, False]
quiesce_expected = not quiesce_fails and vm_state == vm_states.ACTIVE
def fake_get_all_by_instance(context, instance, use_slave=False):
return copy.deepcopy(instance_bdms)
def fake_image_create(context, image_meta, data=None):
self.assertThat(image_meta, matchers.DictMatches(expect_meta))
def fake_volume_get(context, volume_id):
return {'id': volume_id, 'display_description': ''}
def fake_volume_create_snapshot(context, volume_id, name, description):
return {'id': '%s-snapshot' % volume_id}
def fake_quiesce_instance(context, instance):
if quiesce_fails:
raise exception.InstanceQuiesceNotSupported(
instance_id=instance['uuid'], reason='test')
quiesced[0] = True
def fake_unquiesce_instance(context, instance, mapping=None):
quiesced[1] = True
self.stub_out('compute.compute.block_device_mapping_get_all_by_instance',
fake_get_all_by_instance)
self.stubs.Set(self.compute_api.image_api, 'create',
fake_image_create)
self.stubs.Set(self.compute_api.volume_api, 'get',
fake_volume_get)
self.stubs.Set(self.compute_api.volume_api, 'create_snapshot_force',
fake_volume_create_snapshot)
self.stubs.Set(self.compute_api.jacket_rpcapi, 'quiesce_instance',
fake_quiesce_instance)
self.stubs.Set(self.compute_api.jacket_rpcapi, 'unquiesce_instance',
fake_unquiesce_instance)
fake_image.stub_out_image_service(self)
# No block devices defined
self.compute_api.snapshot_volume_backed(
self.context, instance, 'test-snapshot')
bdm = fake_block_device.FakeDbBlockDeviceDict(
{'no_device': False, 'volume_id': '1', 'boot_index': 0,
'connection_info': 'inf', 'device_name': '/dev/vda',
'source_type': 'volume', 'destination_type': 'volume'})
instance_bdms.append(bdm)
expect_meta['properties']['bdm_v2'] = True
expect_meta['properties']['block_device_mapping'] = []
expect_meta['properties']['block_device_mapping'].append(
{'guest_format': None, 'boot_index': 0, 'no_device': None,
'image_id': None, 'volume_id': None, 'disk_bus': None,
'volume_size': None, 'source_type': 'snapshot',
'device_type': None, 'snapshot_id': '1-snapshot',
'device_name': '/dev/vda',
'destination_type': 'volume', 'delete_on_termination': False})
# All the db_only fields and the volume ones are removed
self.compute_api.snapshot_volume_backed(
self.context, instance, 'test-snapshot')
self.assertEqual(quiesce_expected, quiesced[0])
self.assertEqual(quiesce_expected, quiesced[1])
instance.system_metadata['image_mappings'] = jsonutils.dumps(
[{'virtual': 'ami', 'device': 'vda'},
{'device': 'vda', 'virtual': 'ephemeral0'},
{'device': 'vdb', 'virtual': 'swap'},
{'device': 'vdc', 'virtual': 'ephemeral1'}])[:255]
instance.system_metadata['image_block_device_mapping'] = (
jsonutils.dumps(
[{'source_type': 'snapshot', 'destination_type': 'volume',
'guest_format': None, 'device_type': 'disk', 'boot_index': 1,
'disk_bus': 'ide', 'device_name': '/dev/vdf',
'delete_on_termination': True, 'snapshot_id': 'snapshot-2',
'volume_id': None, 'volume_size': 100, 'image_id': None,
'no_device': None}])[:255])
bdm = fake_block_device.FakeDbBlockDeviceDict(
{'no_device': False, 'volume_id': None, 'boot_index': -1,
'connection_info': 'inf', 'device_name': '/dev/vdh',
'source_type': 'blank', 'destination_type': 'local',
'guest_format': 'swap', 'delete_on_termination': True})
instance_bdms.append(bdm)
expect_meta['properties']['block_device_mapping'].append(
{'guest_format': 'swap', 'boot_index': -1, 'no_device': False,
'image_id': None, 'volume_id': None, 'disk_bus': None,
'volume_size': None, 'source_type': 'blank',
'device_type': None, 'snapshot_id': None,
'device_name': '/dev/vdh',
'destination_type': 'local', 'delete_on_termination': True})
quiesced = [False, False]
# Check that the mappgins from the image properties are not included
self.compute_api.snapshot_volume_backed(
self.context, instance, 'test-snapshot')
self.assertEqual(quiesce_expected, quiesced[0])
self.assertEqual(quiesce_expected, quiesced[1])
def test_snapshot_volume_backed(self):
self._test_snapshot_volume_backed(False, False)
def test_snapshot_volume_backed_with_quiesce(self):
self._test_snapshot_volume_backed(True, False)
def test_snapshot_volume_backed_with_quiesce_skipped(self):
self._test_snapshot_volume_backed(False, True)
def test_snapshot_volume_backed_with_quiesce_exception(self):
self.assertRaises(exception.NovaException,
self._test_snapshot_volume_backed, True, True)
def test_snapshot_volume_backed_with_quiesce_stopped(self):
self._test_snapshot_volume_backed(True, True,
vm_state=vm_states.STOPPED)
def test_volume_snapshot_create(self):
volume_id = '1'
create_info = {'id': 'eyedee'}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'device_name': '/dev/sda2',
'source_type': 'volume',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 1,
'boot_index': -1})
fake_bdm['instance'] = fake_instance.fake_db_instance()
fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
fake_bdm = compute.BlockDeviceMapping._from_db_object(
self.context, compute.BlockDeviceMapping(),
fake_bdm, expected_attrs=['instance'])
self.mox.StubOutWithMock(compute.BlockDeviceMapping,
'get_by_volume')
self.mox.StubOutWithMock(self.compute_api.jacket_rpcapi,
'volume_snapshot_create')
compute.BlockDeviceMapping.get_by_volume(
self.context, volume_id,
expected_attrs=['instance']).AndReturn(fake_bdm)
self.compute_api.jacket_rpcapi.volume_snapshot_create(self.context,
fake_bdm['instance'], volume_id, create_info)
self.mox.ReplayAll()
snapshot = self.compute_api.volume_snapshot_create(self.context,
volume_id, create_info)
expected_snapshot = {
'snapshot': {
'id': create_info['id'],
'volumeId': volume_id,
},
}
self.assertEqual(snapshot, expected_snapshot)
def test_volume_snapshot_delete(self):
volume_id = '1'
snapshot_id = '2'
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'device_name': '/dev/sda2',
'source_type': 'volume',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 1,
'boot_index': -1})
fake_bdm['instance'] = fake_instance.fake_db_instance()
fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
fake_bdm = compute.BlockDeviceMapping._from_db_object(
self.context, compute.BlockDeviceMapping(),
fake_bdm, expected_attrs=['instance'])
self.mox.StubOutWithMock(compute.BlockDeviceMapping,
'get_by_volume')
self.mox.StubOutWithMock(self.compute_api.jacket_rpcapi,
'volume_snapshot_delete')
compute.BlockDeviceMapping.get_by_volume(
self.context, volume_id,
expected_attrs=['instance']).AndReturn(fake_bdm)
self.compute_api.jacket_rpcapi.volume_snapshot_delete(self.context,
fake_bdm['instance'], volume_id, snapshot_id, {})
self.mox.ReplayAll()
self.compute_api.volume_snapshot_delete(self.context, volume_id,
snapshot_id, {})
def _test_boot_volume_bootable(self, is_bootable=False):
def get_vol_data(*args, **kwargs):
return {'bootable': is_bootable}
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': '1',
'delete_on_termination': False,
}]
expected_meta = {'min_disk': 0, 'min_ram': 0, 'properties': {},
'size': 0, 'status': 'active'}
with mock.patch.object(self.compute_api.volume_api, 'get',
side_effect=get_vol_data):
if not is_bootable:
self.assertRaises(exception.InvalidBDMVolumeNotBootable,
self.compute_api._get_bdm_image_metadata,
self.context, block_device_mapping)
else:
meta = self.compute_api._get_bdm_image_metadata(self.context,
block_device_mapping)
self.assertEqual(expected_meta, meta)
def test_boot_volume_non_bootable(self):
self._test_boot_volume_bootable(False)
def test_boot_volume_bootable(self):
self._test_boot_volume_bootable(True)
def test_boot_volume_basic_property(self):
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': None,
'volume_id': '1',
'delete_on_termination': False,
}]
fake_volume = {"volume_image_metadata":
{"min_ram": 256, "min_disk": 128, "foo": "bar"}}
with mock.patch.object(self.compute_api.volume_api, 'get',
return_value=fake_volume):
meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping)
self.assertEqual(256, meta['min_ram'])
self.assertEqual(128, meta['min_disk'])
self.assertEqual('active', meta['status'])
self.assertEqual('bar', meta['properties']['foo'])
def test_boot_volume_snapshot_basic_property(self):
block_device_mapping = [{
'id': 1,
'device_name': 'vda',
'no_device': None,
'virtual_name': None,
'snapshot_id': '2',
'volume_id': None,
'delete_on_termination': False,
}]
fake_volume = {"volume_image_metadata":
{"min_ram": 256, "min_disk": 128, "foo": "bar"}}
fake_snapshot = {"volume_id": "1"}
with test.nested(
mock.patch.object(self.compute_api.volume_api, 'get',
return_value=fake_volume),
mock.patch.object(self.compute_api.volume_api, 'get_snapshot',
return_value=fake_snapshot)) as (
volume_get, volume_get_snapshot):
meta = self.compute_api._get_bdm_image_metadata(
self.context, block_device_mapping)
self.assertEqual(256, meta['min_ram'])
self.assertEqual(128, meta['min_disk'])
self.assertEqual('active', meta['status'])
self.assertEqual('bar', meta['properties']['foo'])
volume_get_snapshot.assert_called_once_with(self.context,
block_device_mapping[0]['snapshot_id'])
volume_get.assert_called_once_with(self.context,
fake_snapshot['volume_id'])
def _create_instance_with_disabled_disk_config(self, object=False):
sys_meta = {"image_auto_disk_config": "Disabled"}
params = {"system_metadata": sys_meta}
instance = self._create_instance_obj(params=params)
if object:
return instance
return obj_base.obj_to_primitive(instance)
def _setup_fake_image_with_disabled_disk_config(self):
self.fake_image = {
'id': 1,
'name': 'fake_name',
'status': 'active',
'properties': {"auto_disk_config": "Disabled"},
}
def fake_show(obj, context, image_id, **kwargs):
return self.fake_image
fake_image.stub_out_image_service(self)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
return self.fake_image['id']
def test_resize_with_disabled_auto_disk_config_fails(self):
fake_inst = self._create_instance_with_disabled_disk_config(
object=True)
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.resize,
self.context, fake_inst,
auto_disk_config=True)
def test_create_with_disabled_auto_disk_config_fails(self):
image_id = self._setup_fake_image_with_disabled_disk_config()
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.create, self.context,
"fake_flavor", image_id, auto_disk_config=True)
def test_rebuild_with_disabled_auto_disk_config_fails(self):
fake_inst = self._create_instance_with_disabled_disk_config(
object=True)
image_id = self._setup_fake_image_with_disabled_disk_config()
self.assertRaises(exception.AutoDiskConfigDisabledByImage,
self.compute_api.rebuild,
self.context,
fake_inst,
image_id,
"new password",
auto_disk_config=True)
@mock.patch.object(compute.Instance, 'save')
@mock.patch.object(compute.Instance, 'get_flavor')
@mock.patch.object(compute.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
@mock.patch.object(compute_api.API, '_record_action_start')
def test_rebuild(self, _record_action_start,
_checks_for_create_and_rebuild, _check_auto_disk_config,
_get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
image_ref='foo',
expected_attrs=['system_metadata'])
get_flavor.return_value = test_flavor.fake_flavor
flavor = instance.get_flavor()
image_href = 'foo'
image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': arch.X86_64}}
admin_pass = ''
files_to_inject = []
bdms = compute.BlockDeviceMappingList()
_get_image.return_value = (None, image)
bdm_get_by_instance_uuid.return_value = bdms
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
self.compute_api.rebuild(self.context, instance, image_href,
admin_pass, files_to_inject)
rebuild_instance.assert_called_once_with(self.context,
instance=instance, new_pass=admin_pass,
injected_files=files_to_inject, image_ref=image_href,
orig_image_ref=image_href,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host, kwargs={})
_check_auto_disk_config.assert_called_once_with(image=image)
_checks_for_create_and_rebuild.assert_called_once_with(self.context,
None, image, flavor, {}, [], None)
self.assertNotEqual(orig_system_metadata, instance.system_metadata)
@mock.patch.object(compute.Instance, 'save')
@mock.patch.object(compute.Instance, 'get_flavor')
@mock.patch.object(compute.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
@mock.patch.object(compute_api.API, '_record_action_start')
def test_rebuild_change_image(self, _record_action_start,
_checks_for_create_and_rebuild, _check_auto_disk_config,
_get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
orig_system_metadata = {}
get_flavor.return_value = test_flavor.fake_flavor
orig_image_href = 'orig_image'
orig_image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': arch.X86_64,
'vm_mode': 'hvm'}}
new_image_href = 'new_image'
new_image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': arch.X86_64,
'vm_mode': 'xen'}}
admin_pass = ''
files_to_inject = []
bdms = compute.BlockDeviceMappingList()
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'],
image_ref=orig_image_href,
vm_mode=vm_mode.HVM)
flavor = instance.get_flavor()
def get_image(context, image_href):
if image_href == new_image_href:
return (None, new_image)
if image_href == orig_image_href:
return (None, orig_image)
_get_image.side_effect = get_image
bdm_get_by_instance_uuid.return_value = bdms
with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
self.compute_api.rebuild(self.context, instance, new_image_href,
admin_pass, files_to_inject)
rebuild_instance.assert_called_once_with(self.context,
instance=instance, new_pass=admin_pass,
injected_files=files_to_inject, image_ref=new_image_href,
orig_image_ref=orig_image_href,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host, kwargs={})
_check_auto_disk_config.assert_called_once_with(image=new_image)
_checks_for_create_and_rebuild.assert_called_once_with(self.context,
None, new_image, flavor, {}, [], None)
self.assertEqual(vm_mode.XEN, instance.vm_mode)
def _test_check_injected_file_quota_onset_file_limit_exceeded(self,
side_effect):
injected_files = [
{
"path": "/etc/banner.txt",
"contents": "foo"
}
]
with mock.patch.object(quota.QUOTAS, 'limit_check',
side_effect=side_effect):
self.compute_api._check_injected_file_quota(
self.context, injected_files)
def test_check_injected_file_quota_onset_file_limit_exceeded(self):
# This is the first call to limit_check.
side_effect = exception.OverQuota(overs='injected_files')
self.assertRaises(exception.OnsetFileLimitExceeded,
self._test_check_injected_file_quota_onset_file_limit_exceeded,
side_effect)
def test_check_injected_file_quota_onset_file_path_limit(self):
# This is the second call to limit_check.
side_effect = (mock.DEFAULT,
exception.OverQuota(overs='injected_file_path_bytes'))
self.assertRaises(exception.OnsetFilePathLimitExceeded,
self._test_check_injected_file_quota_onset_file_limit_exceeded,
side_effect)
def test_check_injected_file_quota_onset_file_content_limit(self):
# This is the second call to limit_check but with different overs.
side_effect = (mock.DEFAULT,
exception.OverQuota(overs='injected_file_content_bytes'))
self.assertRaises(exception.OnsetFileContentLimitExceeded,
self._test_check_injected_file_quota_onset_file_limit_exceeded,
side_effect)
@mock.patch('compute.compute.Quotas.commit')
@mock.patch('compute.compute.Quotas.reserve')
@mock.patch('compute.compute.Instance.save')
@mock.patch('compute.compute.InstanceAction.action_start')
def test_restore_by_admin(self, action_start, instance_save,
quota_reserve, quota_commit):
admin_context = context.RequestContext('admin_user',
'admin_project',
True)
instance = self._create_instance_obj()
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save()
with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc:
self.compute_api.restore(admin_context, instance)
rpc.restore_instance.assert_called_once_with(admin_context,
instance)
self.assertEqual(instance.task_state, task_states.RESTORING)
self.assertEqual(1, quota_commit.call_count)
quota_reserve.assert_called_once_with(instances=1,
cores=instance.flavor.vcpus, ram=instance.flavor.memory_mb,
project_id=instance.project_id, user_id=instance.user_id)
@mock.patch('compute.compute.Quotas.commit')
@mock.patch('compute.compute.Quotas.reserve')
@mock.patch('compute.compute.Instance.save')
@mock.patch('compute.compute.InstanceAction.action_start')
def test_restore_by_instance_owner(self, action_start, instance_save,
quota_reserve, quota_commit):
instance = self._create_instance_obj()
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save()
with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc:
self.compute_api.restore(self.context, instance)
rpc.restore_instance.assert_called_once_with(self.context,
instance)
self.assertEqual(instance.project_id, self.context.project_id)
self.assertEqual(instance.task_state, task_states.RESTORING)
self.assertEqual(1, quota_commit.call_count)
quota_reserve.assert_called_once_with(instances=1,
cores=instance.flavor.vcpus, ram=instance.flavor.memory_mb,
project_id=instance.project_id, user_id=instance.user_id)
def test_external_instance_event(self):
instances = [
compute.Instance(uuid=uuids.instance_1, host='host1'),
compute.Instance(uuid=uuids.instance_2, host='host1'),
compute.Instance(uuid=uuids.instance_3, host='host2'),
]
events = [
compute.InstanceExternalEvent(
instance_uuid=uuids.instance_1),
compute.InstanceExternalEvent(
instance_uuid=uuids.instance_2),
compute.InstanceExternalEvent(
instance_uuid=uuids.instance_3),
]
self.compute_api.jacket_rpcapi = mock.MagicMock()
self.compute_api.external_instance_event(self.context,
instances, events)
method = self.compute_api.jacket_rpcapi.external_instance_event
method.assert_any_call(self.context, instances[0:2], events[0:2])
method.assert_any_call(self.context, instances[2:], events[2:])
self.assertEqual(2, method.call_count)
def test_volume_ops_invalid_task_state(self):
instance = self._create_instance_obj()
self.assertEqual(instance.vm_state, vm_states.ACTIVE)
instance.task_state = 'Any'
volume_id = uuidutils.generate_uuid()
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_volume,
self.context, instance, volume_id)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_volume,
self.context, instance, volume_id)
new_volume_id = uuidutils.generate_uuid()
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.swap_volume,
self.context, instance,
volume_id, new_volume_id)
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_get_bdm_image_metadata_with_cinder_down(self, mock_get):
bdms = [compute.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
}))]
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api._get_bdm_image_metadata,
self.context,
bdms, legacy_bdm=True)
@mock.patch.object(cinder.API, 'get')
@mock.patch.object(cinder.API, 'check_attach',
side_effect=exception.InvalidVolume(reason='error'))
def test_validate_bdm_with_error_volume(self, mock_check_attach, mock_get):
# Tests that an InvalidVolume exception raised from
# volume_api.check_attach due to the volume status not being
# 'available' results in _validate_bdm re-raising InvalidVolume.
instance = self._create_instance_obj()
instance_type = self._create_flavor()
volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8'
volume_info = {'status': 'error',
'attach_status': 'detached',
'id': volume_id}
mock_get.return_value = volume_info
bdms = [compute.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'boot_index': 0,
'volume_id': volume_id,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
}))]
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
self.context,
instance, instance_type, bdms)
mock_get.assert_called_once_with(self.context, volume_id)
mock_check_attach.assert_called_once_with(
self.context, volume_info, instance=instance)
@mock.patch.object(cinder.API, 'get_snapshot',
side_effect=exception.CinderConnectionFailed(reason='error'))
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot):
instance = self._create_instance_obj()
instance_type = self._create_flavor()
bdm = [compute.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
bdms = [compute.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'snapshot_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api._validate_bdm,
self.context,
instance, instance_type, bdm)
self.assertRaises(exception.CinderConnectionFailed,
self.compute_api._validate_bdm,
self.context,
instance, instance_type, bdms)
def _test_create_db_entry_for_new_instance_with_cinder_error(self,
expected_exception):
@mock.patch.object(compute.Instance, 'create')
@mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default')
@mock.patch.object(compute_api.API, '_populate_instance_names')
@mock.patch.object(compute_api.API, '_populate_instance_for_create')
def do_test(self, mock_create, mock_names, mock_ensure,
mock_inst_create):
instance = self._create_instance_obj()
instance['display_name'] = 'FAKE_DISPLAY_NAME'
instance['shutdown_terminate'] = False
instance_type = self._create_flavor()
fake_image = {
'id': 'fake-image-id',
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away'}
fake_security_group = None
fake_num_instances = 1
fake_index = 1
bdm = [compute.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
with mock.patch.object(instance, "destroy") as destroy:
self.assertRaises(expected_exception,
self.compute_api.
create_db_entry_for_new_instance,
self.context,
instance_type,
fake_image,
instance,
fake_security_group,
bdm,
fake_num_instances,
fake_index)
destroy.assert_called_once_with()
# We use a nested method so we can decorate with the mocks.
do_test(self)
@mock.patch.object(cinder.API, 'get',
side_effect=exception.CinderConnectionFailed(reason='error'))
def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get):
self._test_create_db_entry_for_new_instance_with_cinder_error(
expected_exception=exception.CinderConnectionFailed)
@mock.patch.object(cinder.API, 'get',
return_value={'id': 1, 'status': 'error',
'attach_status': 'detached'})
def test_create_db_entry_for_new_instancewith_error_volume(self, mock_get):
self._test_create_db_entry_for_new_instance_with_cinder_error(
expected_exception=exception.InvalidVolume)
def test_provision_instances_creates_request_spec(self):
@mock.patch.object(self.compute_api, '_check_num_instances_quota')
@mock.patch.object(compute.Instance, 'create')
@mock.patch.object(self.compute_api.security_group_api,
'ensure_default')
@mock.patch.object(self.compute_api, '_validate_bdm')
@mock.patch.object(self.compute_api, '_create_block_device_mapping')
@mock.patch.object(compute.RequestSpec, 'from_components')
@mock.patch.object(compute, 'BuildRequest')
def do_test(_mock_build_req,
mock_req_spec_from_components, _mock_create_bdm,
_mock_validate_bdm, _mock_ensure_default, _mock_create,
mock_check_num_inst_quota):
quota_mock = mock.MagicMock()
req_spec_mock = mock.MagicMock()
mock_check_num_inst_quota.return_value = (1, quota_mock)
mock_req_spec_from_components.return_value = req_spec_mock
ctxt = context.RequestContext('fake-user', 'fake-project')
flavor = self._create_flavor()
min_count = max_count = 1
boot_meta = {
'id': 'fake-image-id',
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away'}
base_options = {'image_ref': 'fake-ref',
'display_name': 'fake-name',
'project_id': 'fake-project',
'availability_zone': None,
'metadata': {},
'access_ip_v4': None,
'access_ip_v6': None,
'config_drive': None,
'key_name': None,
'numa_topology': None,
'pci_requests': None}
security_groups = {}
block_device_mapping = [compute.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
shutdown_terminate = True
instance_group = None
check_server_group_quota = False
filter_properties = {'scheduler_hints': None,
'instance_type': flavor}
instances = self.compute_api._provision_instances(ctxt, flavor,
min_count, max_count, base_options, boot_meta,
security_groups, block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota,
filter_properties)
self.assertTrue(uuidutils.is_uuid_like(instances[0].uuid))
mock_req_spec_from_components.assert_called_once_with(ctxt,
mock.ANY, boot_meta, flavor, base_options['numa_topology'],
base_options['pci_requests'], filter_properties,
instance_group, base_options['availability_zone'])
req_spec_mock.create.assert_called_once_with()
do_test()
def test_provision_instances_creates_destroys_build_request(self):
@mock.patch.object(self.compute_api, '_check_num_instances_quota')
@mock.patch.object(compute.Instance, 'create')
@mock.patch.object(compute.Instance, 'save')
@mock.patch.object(self.compute_api.security_group_api,
'ensure_default')
@mock.patch.object(self.compute_api, '_validate_bdm')
@mock.patch.object(self.compute_api, '_create_block_device_mapping')
@mock.patch.object(compute.RequestSpec, 'from_components')
@mock.patch.object(compute, 'BuildRequest')
def do_test(mock_build_req, mock_req_spec_from_components,
_mock_create_bdm, _mock_validate_bdm, _mock_ensure_default,
_mock_inst_create, _mock_inst_save, mock_check_num_inst_quota):
quota_mock = mock.MagicMock()
req_spec_mock = mock.MagicMock()
build_req_mock = mock.MagicMock()
mock_check_num_inst_quota.return_value = (2, quota_mock)
mock_req_spec_from_components.return_value = req_spec_mock
mock_build_req.return_value = build_req_mock
ctxt = context.RequestContext('fake-user', 'fake-project')
flavor = self._create_flavor()
min_count = 1
max_count = 2
boot_meta = {
'id': 'fake-image-id',
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away'}
base_options = {'image_ref': 'fake-ref',
'display_name': 'fake-name',
'project_id': 'fake-project',
'availability_zone': None,
'metadata': {},
'access_ip_v4': None,
'access_ip_v6': None,
'config_drive': None,
'key_name': None,
'numa_topology': None,
'pci_requests': None}
security_groups = {}
block_device_mapping = [compute.BlockDeviceMapping(
**fake_block_device.FakeDbBlockDeviceDict(
{
'id': 1,
'volume_id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': 'vda',
'boot_index': 0,
}))]
shutdown_terminate = True
instance_group = None
check_server_group_quota = False
filter_properties = {'scheduler_hints': None,
'instance_type': flavor}
instances = self.compute_api._provision_instances(ctxt, flavor,
min_count, max_count, base_options, boot_meta,
security_groups, block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota,
filter_properties)
self.assertTrue(uuidutils.is_uuid_like(instances[0].uuid))
display_names = ['fake-name-1', 'fake-name-2']
build_req_calls = [
mock.call(ctxt,
request_spec=req_spec_mock,
project_id=ctxt.project_id,
user_id=ctxt.user_id,
display_name=display_names[0],
instance_metadata=base_options['metadata'],
progress=0,
vm_state=vm_states.BUILDING,
task_state=task_states.SCHEDULING,
image_ref=base_options['image_ref'],
access_ip_v4=base_options['access_ip_v4'],
access_ip_v6=base_options['access_ip_v6'],
info_cache=mock.ANY,
security_groups=mock.ANY,
config_drive=False,
key_name=base_options['config_drive'],
locked_by=None),
mock.call().create(),
mock.call().destroy(),
mock.call(ctxt,
request_spec=req_spec_mock,
project_id=ctxt.project_id,
user_id=ctxt.user_id,
display_name=display_names[1],
instance_metadata=base_options['metadata'],
progress=0,
vm_state=vm_states.BUILDING,
task_state=task_states.SCHEDULING,
image_ref=base_options['image_ref'],
access_ip_v4=base_options['access_ip_v4'],
access_ip_v6=base_options['access_ip_v6'],
info_cache=mock.ANY,
security_groups=mock.ANY,
config_drive=False,
key_name=base_options['config_drive'],
locked_by=None),
mock.call().create(),
mock.call().destroy()
]
mock_build_req.assert_has_calls(build_req_calls)
do_test()
def _test_rescue(self, vm_state=vm_states.ACTIVE, rescue_password=None,
rescue_image=None, clean_shutdown=True):
instance = self._create_instance_obj(params={'vm_state': vm_state})
bdms = []
with test.nested(
mock.patch.object(compute.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=bdms),
mock.patch.object(self.compute_api, 'is_volume_backed_instance',
return_value=False),
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api, '_record_action_start'),
mock.patch.object(self.compute_api.jacket_rpcapi,
'rescue_instance')
) as (
bdm_get_by_instance_uuid, volume_backed_inst, instance_save,
record_action_start, rpcapi_rescue_instance
):
self.compute_api.rescue(self.context, instance,
rescue_password=rescue_password,
rescue_image_ref=rescue_image,
clean_shutdown=clean_shutdown)
# assert field values set on the instance object
self.assertEqual(task_states.RESCUING, instance.task_state)
# assert our mock calls
bdm_get_by_instance_uuid.assert_called_once_with(
self.context, instance.uuid)
volume_backed_inst.assert_called_once_with(
self.context, instance, bdms)
instance_save.assert_called_once_with(expected_task_state=[None])
record_action_start.assert_called_once_with(
self.context, instance, instance_actions.RESCUE)
rpcapi_rescue_instance.assert_called_once_with(
self.context, instance=instance,
rescue_password=rescue_password,
rescue_image_ref=rescue_image,
clean_shutdown=clean_shutdown)
def test_rescue_active(self):
self._test_rescue()
def test_rescue_stopped(self):
self._test_rescue(vm_state=vm_states.STOPPED)
def test_rescue_error(self):
self._test_rescue(vm_state=vm_states.ERROR)
def test_rescue_with_password(self):
self._test_rescue(rescue_password='fake-password')
def test_rescue_with_image(self):
self._test_rescue(rescue_image='fake-image')
def test_rescue_forced_shutdown(self):
self._test_rescue(clean_shutdown=False)
def test_unrescue(self):
instance = self._create_instance_obj(
params={'vm_state': vm_states.RESCUED})
with test.nested(
mock.patch.object(instance, 'save'),
mock.patch.object(self.compute_api, '_record_action_start'),
mock.patch.object(self.compute_api.jacket_rpcapi,
'unrescue_instance')
) as (
instance_save, record_action_start, rpcapi_unrescue_instance
):
self.compute_api.unrescue(self.context, instance)
# assert field values set on the instance object
self.assertEqual(task_states.UNRESCUING, instance.task_state)
# assert our mock calls
instance_save.assert_called_once_with(expected_task_state=[None])
record_action_start.assert_called_once_with(
self.context, instance, instance_actions.UNRESCUE)
rpcapi_unrescue_instance.assert_called_once_with(
self.context, instance=instance)
def test_set_admin_password_invalid_state(self):
# Tests that InstanceInvalidState is raised when not ACTIVE.
instance = self._create_instance_obj({'vm_state': vm_states.STOPPED})
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.set_admin_password,
self.context, instance)
def test_set_admin_password(self):
# Ensure instance can have its admin password set.
instance = self._create_instance_obj()
@mock.patch.object(compute.Instance, 'save')
@mock.patch.object(self.compute_api, '_record_action_start')
@mock.patch.object(self.compute_api.jacket_rpcapi,
'set_admin_password')
def do_test(compute_rpcapi_mock, record_mock, instance_save_mock):
# call the API
self.compute_api.set_admin_password(self.context, instance)
# make our assertions
instance_save_mock.assert_called_once_with(
expected_task_state=[None])
record_mock.assert_called_once_with(
self.context, instance, instance_actions.CHANGE_PASSWORD)
compute_rpcapi_mock.assert_called_once_with(
self.context, instance=instance, new_pass=None)
do_test()
def _test_attach_interface_invalid_state(self, state):
instance = self._create_instance_obj(
params={'vm_state': state})
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.attach_interface,
self.context, instance, '', '', '', [])
def test_attach_interface_invalid_state(self):
for state in [vm_states.BUILDING, vm_states.DELETED,
vm_states.ERROR, vm_states.RESCUED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.SUSPENDED, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED]:
self._test_attach_interface_invalid_state(state)
def _test_detach_interface_invalid_state(self, state):
instance = self._create_instance_obj(
params={'vm_state': state})
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_interface,
self.context, instance, '', '', '', [])
def test_detach_interface_invalid_state(self):
for state in [vm_states.BUILDING, vm_states.DELETED,
vm_states.ERROR, vm_states.RESCUED,
vm_states.RESIZED, vm_states.SOFT_DELETED,
vm_states.SUSPENDED, vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED]:
self._test_detach_interface_invalid_state(state)
def _test_check_and_transform_bdm(self, block_device_mapping):
instance_type = self._create_flavor()
base_options = {'uuid': uuids.bdm_instance,
'image_ref': 'fake_image_ref',
'metadata': {}}
image_meta = {'status': 'active',
'name': 'image_name',
'deleted': False,
'container_format': 'bare',
'id': 'image_id'}
legacy_bdm = False
block_device_mapping = block_device_mapping
self.assertRaises(exception.InvalidRequest,
self.compute_api._check_and_transform_bdm,
self.context, base_options, instance_type,
image_meta, 1, 1, block_device_mapping, legacy_bdm)
def test_check_and_transform_bdm_source_volume(self):
block_device_mapping = [{'boot_index': 0,
'device_name': None,
'image_id': 'image_id',
'source_type': 'image'},
{'device_name': '/dev/vda',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': None,
'volume_id': 'volume_id'}]
self._test_check_and_transform_bdm(block_device_mapping)
def test_check_and_transform_bdm_source_snapshot(self):
block_device_mapping = [{'boot_index': 0,
'device_name': None,
'image_id': 'image_id',
'source_type': 'image'},
{'device_name': '/dev/vda',
'source_type': 'snapshot',
'destination_type': 'volume',
'device_type': None,
'volume_id': 'volume_id'}]
self._test_check_and_transform_bdm(block_device_mapping)
@mock.patch.object(compute.Instance, 'save')
@mock.patch.object(compute.InstanceAction, 'action_start')
@mock.patch.object(compute_rpcapi.JacketAPI, 'pause_instance')
@mock.patch.object(compute.Instance, 'get_by_uuid')
@mock.patch.object(compute_api.API, '_get_instances_by_filters',
return_value=[])
@mock.patch.object(compute_api.API, '_create_instance')
def test_skip_policy_check(self, mock_create, mock_get_ins_by_filters,
mock_get, mock_pause, mock_action, mock_save):
policy.reset()
rules = {'compute:pause': '!',
'compute:get': '!',
'compute:get_all': '!',
'compute:create': '!'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
instance = self._create_instance_obj()
mock_get.return_value = instance
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.pause, self.context, instance)
api = compute_api.API(skip_policy_check=True)
api.pause(self.context, instance)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get, self.context, instance.uuid)
api = compute_api.API(skip_policy_check=True)
api.get(self.context, instance.uuid)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_all, self.context)
api = compute_api.API(skip_policy_check=True)
api.get_all(self.context)
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.create, self.context, None, None)
api = compute_api.API(skip_policy_check=True)
api.create(self.context, None, None)
@mock.patch.object(compute_api.API, '_get_instances_by_filters')
def test_tenant_to_project_conversion(self, mock_get):
mock_get.return_value = []
api = compute_api.API()
api.get_all(self.context, search_opts={'tenant_id': 'foo'})
filters = mock_get.call_args_list[0][0][1]
self.assertEqual({'project_id': 'foo'}, filters)
def test_metadata_invalid_return_empty_object(self):
api = compute_api.API()
ret = api.get_all(self.context, want_objects=True,
search_opts={'metadata': 'foo'})
self.assertIsInstance(ret, compute.InstanceList)
self.assertEqual(0, len(ret))
def test_metadata_invalid_return_empty_list(self):
api = compute_api.API()
ret = api.get_all(self.context, want_objects=False,
search_opts={'metadata': 'foo'})
self.assertIsInstance(ret, list)
self.assertEqual(0, len(ret))
def test_populate_instance_names_host_name(self):
params = dict(display_name="vm1")
instance = self._create_instance_obj(params=params)
self.compute_api._populate_instance_names(instance, 1)
self.assertEqual('vm1', instance.hostname)
def test_populate_instance_names_host_name_is_empty(self):
params = dict(display_name=u'\u865a\u62df\u673a\u662f\u4e2d\u6587')
instance = self._create_instance_obj(params=params)
self.compute_api._populate_instance_names(instance, 1)
self.assertEqual('Server-%s' % instance.uuid, instance.hostname)
def test_populate_instance_names_host_name_multi(self):
params = dict(display_name="vm")
instance = self._create_instance_obj(params=params)
with mock.patch.object(instance, 'save'):
self.compute_api._apply_instance_name_template(self.context,
instance, 1)
self.assertEqual('vm-2', instance.hostname)
def test_populate_instance_names_host_name_is_empty_multi(self):
params = dict(display_name=u'\u865a\u62df\u673a\u662f\u4e2d\u6587')
instance = self._create_instance_obj(params=params)
with mock.patch.object(instance, 'save'):
self.compute_api._apply_instance_name_template(self.context,
instance, 1)
self.assertEqual('Server-%s' % instance.uuid, instance.hostname)
def test_host_statuses(self):
instances = [
compute.Instance(uuid=uuids.instance_1, host='host1', services=
self._obj_to_list_obj(compute.ServiceList(
self.context), compute.Service(id=0, host='host1',
disabled=True, forced_down=True,
binary='compute-compute'))),
compute.Instance(uuid=uuids.instance_2, host='host2', services=
self._obj_to_list_obj(compute.ServiceList(
self.context), compute.Service(id=0, host='host2',
disabled=True, forced_down=False,
binary='compute-compute'))),
compute.Instance(uuid=uuids.instance_3, host='host3', services=
self._obj_to_list_obj(compute.ServiceList(
self.context), compute.Service(id=0, host='host3',
disabled=False, last_seen_up=timeutils.utcnow()
- datetime.timedelta(minutes=5),
forced_down=False, binary='compute-compute'))),
compute.Instance(uuid=uuids.instance_4, host='host4', services=
self._obj_to_list_obj(compute.ServiceList(
self.context), compute.Service(id=0, host='host4',
disabled=False, last_seen_up=timeutils.utcnow(),
forced_down=False, binary='compute-compute'))),
compute.Instance(uuid=uuids.instance_5, host='host5', services=
compute.ServiceList()),
compute.Instance(uuid=uuids.instance_6, host=None, services=
self._obj_to_list_obj(compute.ServiceList(
self.context), compute.Service(id=0, host='host6',
disabled=True, forced_down=False,
binary='compute-compute'))),
compute.Instance(uuid=uuids.instance_7, host='host2', services=
self._obj_to_list_obj(compute.ServiceList(
self.context), compute.Service(id=0, host='host2',
disabled=True, forced_down=False,
binary='compute-compute')))
]
host_statuses = self.compute_api.get_instances_host_statuses(
instances)
expect_statuses = {uuids.instance_1: fields_obj.HostStatus.DOWN,
uuids.instance_2: fields_obj.HostStatus.MAINTENANCE,
uuids.instance_3: fields_obj.HostStatus.UNKNOWN,
uuids.instance_4: fields_obj.HostStatus.UP,
uuids.instance_5: fields_obj.HostStatus.NONE,
uuids.instance_6: fields_obj.HostStatus.NONE,
uuids.instance_7: fields_obj.HostStatus.MAINTENANCE}
for instance in instances:
self.assertEqual(expect_statuses[instance.uuid],
host_statuses[instance.uuid])
@mock.patch.object(compute.Migration, 'get_by_id_and_instance')
@mock.patch.object(compute.InstanceAction, 'action_start')
def test_live_migrate_force_complete_succeeded(
self, action_start, get_by_id_and_instance):
if self.cell_type == 'api':
# cell api has not been implemented.
return
rpcapi = self.compute_api.jacket_rpcapi
instance = self._create_instance_obj()
instance.task_state = task_states.MIGRATING
migration = compute.Migration()
migration.id = 0
migration.status = 'running'
get_by_id_and_instance.return_value = migration
with mock.patch.object(
rpcapi, 'live_migration_force_complete') as lm_force_complete:
self.compute_api.live_migrate_force_complete(
self.context, instance, migration.id)
lm_force_complete.assert_called_once_with(self.context,
instance,
0)
action_start.assert_called_once_with(
self.context, instance.uuid, 'live_migration_force_complete',
want_result=False)
@mock.patch.object(compute.Migration, 'get_by_id_and_instance')
def test_live_migrate_force_complete_invalid_migration_state(
self, get_by_id_and_instance):
instance = self._create_instance_obj()
instance.task_state = task_states.MIGRATING
migration = compute.Migration()
migration.id = 0
migration.status = 'error'
get_by_id_and_instance.return_value = migration
self.assertRaises(exception.InvalidMigrationState,
self.compute_api.live_migrate_force_complete,
self.context, instance, migration.id)
def test_live_migrate_force_complete_invalid_vm_state(self):
instance = self._create_instance_obj()
instance.task_state = None
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.live_migrate_force_complete,
self.context, instance, '1')
def _get_migration(self, migration_id, status, migration_type):
migration = compute.Migration()
migration.id = migration_id
migration.status = status
migration.migration_type = migration_type
return migration
@mock.patch('compute.compute.api.API._record_action_start')
@mock.patch.object(compute_rpcapi.JacketAPI, 'live_migration_abort')
@mock.patch.object(compute.Migration, 'get_by_id_and_instance')
def test_live_migrate_abort_succeeded(self,
mock_get_migration,
mock_lm_abort,
mock_rec_action):
instance = self._create_instance_obj()
instance.task_state = task_states.MIGRATING
migration = self._get_migration(21, 'running', 'live-migration')
mock_get_migration.return_value = migration
self.compute_api.live_migrate_abort(self.context,
instance,
migration.id)
mock_rec_action.assert_called_once_with(self.context,
instance,
instance_actions.LIVE_MIGRATION_CANCEL)
mock_lm_abort.called_once_with(self.context, instance, migration.id)
@mock.patch.object(compute.Migration, 'get_by_id_and_instance')
def test_live_migration_abort_wrong_migration_status(self,
mock_get_migration):
instance = self._create_instance_obj()
instance.task_state = task_states.MIGRATING
migration = self._get_migration(21, 'completed', 'live-migration')
mock_get_migration.return_value = migration
self.assertRaises(exception.InvalidMigrationState,
self.compute_api.live_migrate_abort,
self.context,
instance,
migration.id)
class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
def setUp(self):
super(ComputeAPIUnitTestCase, self).setUp()
self.compute_api = compute_api.API()
self.cell_type = None
def test_resize_same_flavor_fails(self):
self.assertRaises(exception.CannotResizeToSameFlavor,
self._test_resize, same_flavor=True)
class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):
def setUp(self):
super(ComputeAPIAPICellUnitTestCase, self).setUp()
self.flags(cell_type='api', enable=True, group='cells')
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.cell_type = 'api'
def test_resize_same_flavor_fails(self):
self.assertRaises(exception.CannotResizeToSameFlavor,
self._test_resize, same_flavor=True)
@mock.patch.object(compute_cells_api, 'ComputeRPCAPIRedirect')
def test_create_volume_bdm_call_reserve_dev_name(self, mock_reserve):
instance = self._create_instance_obj()
# In the cells rpcapi there isn't the call for the
# reserve_block_device_name so the volume_bdm returned
# by the _create_volume_bdm is None
result = self.compute_api._create_volume_bdm(self.context,
instance,
'vda',
'1',
None,
None)
self.assertIsNone(result, None)
@mock.patch.object(compute_cells_api.ComputeCellsAPI, '_call_to_cells')
def test_attach_volume(self, mock_attach):
instance = self._create_instance_obj()
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
None, None, None, None, None)
mock_volume_api = mock.patch.object(self.compute_api, 'volume_api',
mock.MagicMock(spec=cinder.API))
with mock_volume_api as mock_v_api:
mock_v_api.get.return_value = volume
self.compute_api.attach_volume(
self.context, instance, volume['id'])
mock_v_api.check_attach.assert_called_once_with(self.context,
volume,
instance=instance)
mock_attach.assert_called_once_with(self.context, instance,
'attach_volume', volume['id'],
None, None, None)
def test_attach_volume_reserve_fails(self):
self.skipTest("Reserve is never done in the API cell.")
class ComputeAPIComputeCellUnitTestCase(_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):
def setUp(self):
super(ComputeAPIComputeCellUnitTestCase, self).setUp()
self.flags(cell_type='compute', enable=True, group='cells')
self.compute_api = compute_api.API()
self.cell_type = 'compute'
def test_resize_same_flavor_passes(self):
self._test_resize(same_flavor=True)
class DiffDictTestCase(test.NoDBTestCase):
"""Unit tests for _diff_dict()."""
def test_no_change(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=2, c=3)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, {})
def test_new_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=2, c=3, d=4)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, dict(d=['+', 4]))
def test_changed_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, b=4, c=3)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, dict(b=['+', 4]))
def test_removed_key(self):
old = dict(a=1, b=2, c=3)
new = dict(a=1, c=3)
diff = compute_api._diff_dict(old, new)
self.assertEqual(diff, dict(b=['-']))
class SecurityGroupAPITest(test.NoDBTestCase):
def setUp(self):
super(SecurityGroupAPITest, self).setUp()
self.secgroup_api = compute_api.SecurityGroupAPI()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_instance_security_groups(self):
groups = compute.SecurityGroupList()
groups.objects = [compute.SecurityGroup(name='foo'),
compute.SecurityGroup(name='bar')]
instance = compute.Instance(security_groups=groups)
names = self.secgroup_api.get_instance_security_groups(self.context,
instance)
self.assertEqual(sorted([{'name': 'bar'}, {'name': 'foo'}], key=str),
sorted(names, key=str))
@mock.patch('compute.compute.security_group.make_secgroup_list')
def test_populate_security_groups(self, mock_msl):
r = self.secgroup_api.populate_security_groups([mock.sentinel.group])
mock_msl.assert_called_once_with([mock.sentinel.group])
self.assertEqual(r, mock_msl.return_value)
|
apache-2.0
|
dairin0d/cable-editor
|
object_cable_editor/dairin0d/utils_text.py
|
5
|
4254
|
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ***** END GPL LICENSE BLOCK *****
import re
# Already implemented:
# bpy_extras.io_utils.unique_name(key, name, name_dict, name_max=-1, clean_func=None, sep='.')
#def unique_name(src_name, existing_names):
# name = src_name
# i = 1
# while name in existing_names:
# name = "{}.{:0>3}".format(src_name, i)
# i += 1
# return name
# keep_newlines is False by default because Blender doesn't support multi-line tooltips
def compress_whitespace(s, keep_newlines=False):
#return re.sub("\\s+", " ", s).strip()
if not keep_newlines: return " ".join(s.split())
return "\n".join(" ".join(l.split()) for l in s.splitlines())
def indent(s, t):
res = []
for l in s.splitlines():
res.append(t + l)
return "\n".join(res)
def unindent(s, t=None):
lines = s.splitlines()
if t is None:
nt = len(s)
for l in lines:
nd = len(l) - len(l.lstrip())
# ignore whitespace-only lines
if nd > 0: nt = min(nt, nd)
else:
nt = len(t)
res = []
for l in lines:
nd = len(l) - len(l.lstrip())
res.append(l[min(nt, nd):])
return "\n".join(res)
def split_expressions(s, sep="\t", strip=False):
if sep == "\t":
text = s
else:
sep = sep.strip()
text = ""
brackets = 0
for c in s:
if c in "[{(":
brackets += 1
elif c in "]})":
brackets -= 1
if (brackets == 0) and (c == sep):
c = "\t"
text += c
res = text.split("\t")
return ([s.strip() for s in res] if strip else res)
def math_eval(s):
try:
return float(eval(s, math.__dict__))
except Exception:
# What actual exceptions can be raised by float/math/eval?
return None
def vector_to_text(v, sep="\t", axes_names="xyzw"):
sa = []
for i in range(len(v)):
s = str(v[i])
if axes_names:
s = axes_names[i] + ": " + s
sa.append(s)
return sep.join(sa)
def vector_from_text(v, s, sep="\t", axes_names="xyzw"):
sa = split_expressions(s, sep, True)
if axes_names:
# first, check if there are keyword arguments
kw = False
for a in sa:
if len(a) < 3:
continue
try:
# list has no find() method
i = axes_names.index(a[0].lower())
except ValueError:
i = -1
if (i != -1) and (a[1] == ":"):
v_i = math_eval(a[2:])
if v_i is not None:
v[i] = v_i
kw = True
if kw:
return
for i in range(min(len(v), len(sa))):
v_i = math_eval(sa[i])
if v_i is not None:
v[i] = v_i
# From http://www.bogotobogo.com/python/python_longest_common_substring_lcs_algorithm_generalized_suffix_tree.php
# Actually applicable to any sequence with hashable elements
def longest_common_substring(S, T):
m = len(S)
n = len(T)
counter = [[0]*(n+1) for x in range(m+1)]
longest = 0
lcs_set = set()
for i in range(m):
for j in range(n):
if S[i] == T[j]:
c = counter[i][j] + 1
counter[i+1][j+1] = c
if c > longest:
longest = c
lcs_set = {S[i-c+1:i+1]}
elif c == longest:
lcs_set.add(S[i-c+1:i+1])
return lcs_set
|
gpl-3.0
|
samirasnoun/django_cms_gallery_image
|
cms/south_migrations/0050_save_home.py
|
17
|
16290
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
try:
page1 = orm['cms.Page'].objects.filter(parent__isnull=True)[0]
page1.save()
except IndexError:
pass
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'),)", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
symmetrical = True
|
bsd-3-clause
|
xzturn/tensorflow
|
tensorflow/api_template.__init__.py
|
2
|
5993
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Top-level module of TensorFlow. By convention, we refer to this module as
`tf` instead of `tensorflow`, following the common practice of importing
TensorFlow via the command `import tensorflow as tf`.
The primary function of this module is to import all of the public TensorFlow
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
Note that the file `__init__.py` in the TensorFlow source code tree is actually
only a placeholder to enable test cases to run. The TensorFlow build replaces
this file with a file generated from [`api_template.__init__.py`](https://www.github.com/tensorflow/tensorflow/blob/master/tensorflow/api_template.__init__.py)
"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import distutils as _distutils
import inspect as _inspect
import logging as _logging
import os as _os
import site as _site
import six as _six
import sys as _sys
from tensorflow.python.tools import module_util as _module_util
from tensorflow.python.util.lazy_loader import LazyLoader as _LazyLoader
# API IMPORTS PLACEHOLDER
# WRAPPER_PLACEHOLDER
# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_API_MODULE = _sys.modules[__name__].bitwise
_tf_api_dir = _os.path.dirname(_os.path.dirname(_API_MODULE.__file__))
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
# Hook external TensorFlow modules.
# Import compat before trying to import summary from tensorboard, so that
# reexport_tf_summary can get compat from sys.modules. Only needed if using
# lazy loading.
_current_module.compat.v2 # pylint: disable=pointless-statement
try:
from tensorboard.summary._tf import summary
_current_module.__path__ = (
[_module_util.get_parent_dir(summary)] + _current_module.__path__)
setattr(_current_module, "summary", summary)
except ImportError:
_logging.warning(
"Limited tf.summary API due to missing TensorBoard installation.")
# Lazy-load estimator.
_estimator_module = "tensorflow_estimator.python.estimator.api._v2.estimator"
estimator = _LazyLoader("estimator", globals(), _estimator_module)
_module_dir = _module_util.get_parent_dir_for_name(_estimator_module)
if _module_dir:
_current_module.__path__ = [_module_dir] + _current_module.__path__
setattr(_current_module, "estimator", estimator)
try:
from .python.keras.api._v2 import keras
_current_module.__path__ = (
[_module_util.get_parent_dir(keras)] + _current_module.__path__)
setattr(_current_module, "keras", keras)
except ImportError:
pass
# Explicitly import lazy-loaded modules to support autocompletion.
# pylint: disable=g-import-not-at-top
if not _six.PY2:
import typing as _typing
if _typing.TYPE_CHECKING:
from tensorflow_estimator.python.estimator.api._v2 import estimator
# pylint: enable=g-import-not-at-top
# Enable TF2 behaviors
from tensorflow.python.compat import v2_compat as _compat # pylint: disable=g-import-not-at-top
_compat.enable_v2_behavior()
_major_api_version = 2
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
# directories.
# TODO(gunan): Find a better location for this code snippet.
from tensorflow.python.framework import load_library as _ll
from tensorflow.python.lib.io import file_io as _fi
# Get sitepackages directories for the python installation.
_site_packages_dirs = []
_site_packages_dirs += [_site.USER_SITE]
_site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p]
if 'getsitepackages' in dir(_site):
_site_packages_dirs += _site.getsitepackages()
if 'sysconfig' in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]
_site_packages_dirs = list(set(_site_packages_dirs))
# Find the location of this exact file.
_current_file_location = _inspect.getfile(_inspect.currentframe())
def _running_from_pip_package():
return any(
_current_file_location.startswith(dir_) for dir_ in _site_packages_dirs)
if _running_from_pip_package():
# TODO(gunan): Add sanity checks to loaded modules here.
for _s in _site_packages_dirs:
# Load first party dynamic kernels.
_main_dir = _os.path.join(_s, 'tensorflow_core/core/kernels')
if _fi.file_exists(_main_dir):
_ll.load_library(_main_dir)
# Load third party dynamic kernels.
_plugin_dir = _os.path.join(_s, 'tensorflow-plugins')
if _fi.file_exists(_plugin_dir):
_ll.load_library(_plugin_dir)
# Add module aliases
if hasattr(_current_module, 'keras'):
losses = keras.losses
metrics = keras.metrics
optimizers = keras.optimizers
initializers = keras.initializers
setattr(_current_module, "losses", losses)
setattr(_current_module, "metrics", metrics)
setattr(_current_module, "optimizers", optimizers)
setattr(_current_module, "initializers", initializers)
# pylint: enable=undefined-variable
# __all__ PLACEHOLDER
|
apache-2.0
|
neharejanjeva/techstitution
|
venv/lib/python2.7/encodings/zlib_codec.py
|
58
|
3048
|
""" Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
_is_text_encoding=False,
)
|
cc0-1.0
|
yvaucher/stock-logistics-transport
|
stock_transport_multi_address/__openerp__.py
|
1
|
1317
|
# -*- coding: utf-8 -*-
#
#
# Author: Alexandre Fayolle
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{'name': 'Stock - Transport Addresses',
'summary': 'Manage origin / destination / consignee addresses on pickings',
'version': '1.0',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Warehouse',
'license': 'AGPL-3',
'complexity': 'expert',
'images': [],
'website': "http://www.camptocamp.com",
'depends': ['stock',
],
'demo': [],
'data': ['view/stock.xml',
'view/procurement.xml',
'view/res_partner.xml',
],
'auto_install': False,
'installable': True,
}
|
agpl-3.0
|
ailove-dev/suds
|
suds/umx/basic.py
|
210
|
1394
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides basic unmarshaller classes.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.core import Core
class Basic(Core):
"""
A object builder (unmarshaller).
"""
def process(self, node):
"""
Process an object graph representation of the xml I{node}.
@param node: An XML tree.
@type node: L{sax.element.Element}
@return: A suds object.
@rtype: L{Object}
"""
content = Content(node)
return Core.process(self, content)
|
lgpl-3.0
|
nicholasbs/zulip
|
zerver/lib/context_managers.py
|
120
|
1090
|
"""
Context managers, i.e. things you can use with the 'with' statement.
"""
from __future__ import absolute_import
import fcntl
import os
from contextlib import contextmanager
@contextmanager
def flock(lockfile, shared=False):
"""Lock a file object using flock(2) for the duration of a 'with' statement.
If shared is True, use a LOCK_SH lock, otherwise LOCK_EX."""
fcntl.flock(lockfile, fcntl.LOCK_SH if shared else fcntl.LOCK_EX)
try:
yield
finally:
fcntl.flock(lockfile, fcntl.LOCK_UN)
@contextmanager
def lockfile(filename, shared=False):
"""Lock a file using flock(2) for the duration of a 'with' statement.
If shared is True, use a LOCK_SH lock, otherwise LOCK_EX.
The file is given by name and will be created if it does not exist."""
if not os.path.exists(filename):
with open(filename, 'w') as lock:
lock.write('0')
# TODO: Can we just open the file for writing, and skip the above check?
with open(filename, 'r') as lock:
with flock(lock, shared=shared):
yield
|
apache-2.0
|
simleo/openmicroscopy
|
components/tools/OmeroPy/src/omero/tables.py
|
1
|
33778
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# OMERO Tables Interface
# Copyright 2009 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
import Ice
import time
import numpy
import logging
import threading
import traceback
from os import W_OK
from path import path
import omero # Do we need both??
import omero.clients
import omero.callbacks
# For ease of use
from omero import LockTimeout
from omero.columns import columns2definition
from omero.rtypes import rfloat, rint, rlong, rstring, unwrap
from omero.util.decorators import remoted, locked, perf
from omero_ext import portalocker
from omero_ext.functional import wraps
sys = __import__("sys") # Python sys
tables = __import__("tables") # Pytables
VERSION = '2'
def slen(rv):
"""
Returns the length of the argument or None
if the argument is None
"""
if rv is None:
return None
return len(rv)
def internal_attr(s):
"""
Checks whether this attribute name is reserved for internal use
"""
return s.startswith('__')
def stamped(func, update=False):
"""
Decorator which takes the first argument after "self" and compares
that to the last modification time. If the stamp is older, then the
method call will throw an omero.OptimisticLockException. Otherwise,
execution will complete normally. If update is True, then the
last modification time will be updated after the method call if it
is successful.
Note: stamped implies locked
"""
def check_and_update_stamp(*args, **kwargs):
self = args[0]
stamp = args[1]
if stamp < self._stamp:
raise omero.OptimisticLockException(
None, None, "Resource modified by another thread")
try:
return func(*args, **kwargs)
finally:
if update:
self._stamp = time.time()
check_and_update_stamp = wraps(func)(check_and_update_stamp)
return locked(check_and_update_stamp)
def modifies(func):
"""
Decorator which always calls flush() on the first argument after the
method call
"""
def flush_after(*args, **kwargs):
self = args[0]
try:
return func(*args, **kwargs)
finally:
self.flush()
return wraps(func)(flush_after)
class HdfList(object):
"""
Since two calls to tables.openFile() return non-equal files
with equal fileno's, portalocker cannot be used to prevent
the creation of two HdfStorage instances from the same
Python process.
This also holds a global lock for all HDF5 calls since libhdf5 is usually
compiled without --enable-threadsafe, see
https://trac.openmicroscopy.org.uk/ome/ticket/10464
"""
def __init__(self):
self.logger = logging.getLogger("omero.tables.HdfList")
self._lock = threading.RLock()
self.__filenos = {}
self.__paths = {}
@locked
def addOrThrow(self, hdfpath, hdfstorage):
if hdfpath in self.__paths:
raise omero.LockTimeout(
None, None, "Path already in HdfList: %s" % hdfpath)
parent = path(hdfpath).parent
if not parent.exists():
raise omero.ApiUsageException(
None, None, "Parent directory does not exist: %s" % parent)
hdffile = hdfstorage.openfile("a")
fileno = hdffile.fileno()
try:
portalocker.lockno(
fileno, portalocker.LOCK_NB | portalocker.LOCK_EX)
except portalocker.LockException:
hdffile.close()
raise omero.LockTimeout(
None, None,
"Cannot acquire exclusive lock on: %s" % hdfpath, 0)
except:
hdffile.close()
raise
if fileno in self.__filenos.keys():
hdffile.close()
raise omero.LockTimeout(
None, None, "File already opened by process: %s" % hdfpath, 0)
else:
self.__filenos[fileno] = hdfstorage
self.__paths[hdfpath] = hdfstorage
return hdffile
@locked
def getOrCreate(self, hdfpath):
try:
return self.__paths[hdfpath]
except KeyError:
return HdfStorage(hdfpath, self._lock) # Adds itself.
@locked
def remove(self, hdfpath, hdffile):
del self.__filenos[hdffile.fileno()]
del self.__paths[hdfpath]
# Global object for maintaining files
HDFLIST = HdfList()
class HdfStorage(object):
"""
Provides HDF-storage for measurement results. At most a single
instance will be available for any given physical HDF5 file.
"""
def __init__(self, file_path, hdf5lock):
"""
file_path should be the path to a file in a valid directory where
this HDF instance can be stored (Not None or Empty). Once this
method is finished, self.__hdf_file is guaranteed to be a PyTables HDF
file, but not necessarily initialized.
"""
if file_path is None or str(file_path) == "":
raise omero.ValidationException(None, None, "Invalid file_path")
self.logger = logging.getLogger("omero.tables.HdfStorage")
self.__hdf_path = path(file_path)
# Locking first as described at:
# http://www.pytables.org/trac/ticket/185
self.__hdf_file = HDFLIST.addOrThrow(file_path, self)
self.__tables = []
self._lock = hdf5lock
self._stamp = time.time()
# These are what we'd like to have
self.__mea = None
self.__ome = None
try:
self.__ome = self.__hdf_file.root.OME
self.__mea = self.__ome.Measurements
self.__types = self.__ome.ColumnTypes[:]
self.__descriptions = self.__ome.ColumnDescriptions[:]
self.__initialized = True
except tables.NoSuchNodeError:
self.__initialized = False
self._modified = False
#
# Non-locked methods
#
def size(self):
return self.__hdf_path.size
def openfile(self, mode):
try:
if self.__hdf_path.exists():
if self.__hdf_path.size == 0:
mode = "w"
elif mode != "r" and not self.__hdf_path.access(W_OK):
self.logger.info(
"%s not writable (mode=%s). Opening read-only" % (
self.__hdf_path, mode))
mode = "r"
return tables.openFile(str(self.__hdf_path), mode=mode,
title="OMERO HDF Measurement Storage",
rootUEP="/")
except (tables.HDF5ExtError, IOError) as e:
msg = "HDFStorage initialized with bad path: %s: %s" % (
self.__hdf_path, e)
self.logger.error(msg)
raise omero.ValidationException(None, None, msg)
def modified(self):
return self._modified
def __initcheck(self):
if not self.__initialized:
raise omero.ApiUsageException(None, None, "Not yet initialized")
def __width(self):
return len(self.__types)
def __length(self):
return self.__mea.nrows
def __sizecheck(self, colNumbers, rowNumbers):
if colNumbers is not None:
if len(colNumbers) > 0:
maxcol = max(colNumbers)
totcol = self.__width()
if maxcol >= totcol:
raise omero.ApiUsageException(
None, None, "Column overflow: %s >= %s"
% (maxcol, totcol))
else:
raise omero.ApiUsageException(
None, None, "Columns not specified: %s" % colNumbers)
if rowNumbers is not None:
if len(rowNumbers) > 0:
maxrow = max(rowNumbers)
totrow = self.__length()
if maxrow >= totrow:
raise omero.ApiUsageException(
None, None, "Row overflow: %s >= %s"
% (maxrow, totrow))
else:
raise omero.ApiUsageException(
None, None, "Rows not specified: %s" % rowNumbers)
def __getversion(self):
"""
In OMERO.tables v2 the version attribute name was changed to __version
"""
self.__initcheck()
k = '__version'
try:
v = self.__mea.attrs[k]
if isinstance(v, str):
return v
except KeyError:
k = 'version'
v = self.__mea.attrs[k]
if v == 'v1':
return '1'
msg = "Invalid version attribute (%s=%s) in path: %s" % (
k, v, self.__hdf_path)
self.logger.error(msg)
raise omero.ValidationException(None, None, msg)
#
# Locked methods
#
@locked
def flush(self):
"""
Flush writes to the underlying table, mark this object as modified
"""
self._modified = True
if self.__mea:
self.__mea.flush()
self.logger.debug("Modified flag set")
@locked
@modifies
def initialize(self, cols, metadata=None):
"""
"""
if metadata is None:
metadata = {}
if self.__initialized:
raise omero.ValidationException(None, None, "Already initialized.")
if not cols:
raise omero.ApiUsageException(None, None, "No columns provided")
for c in cols:
if not c.name:
raise omero.ApiUsageException(
None, None, "Column unnamed: %s" % c)
if internal_attr(c.name):
raise omero.ApiUsageException(
None, None, "Reserved column name: %s" % c.name)
self.__definition = columns2definition(cols)
self.__ome = self.__hdf_file.createGroup("/", "OME")
self.__mea = self.__hdf_file.createTable(
self.__ome, "Measurements", self.__definition)
self.__types = [x.ice_staticId() for x in cols]
self.__descriptions = [
(x.description is not None) and x.description or "" for x in cols]
self.__hdf_file.createArray(self.__ome, "ColumnTypes", self.__types)
self.__hdf_file.createArray(
self.__ome, "ColumnDescriptions", self.__descriptions)
md = {}
if metadata:
md = metadata.copy()
md['__version'] = VERSION
md['__initialized'] = time.time()
self.add_meta_map(md, replace=True, init=True)
self.__hdf_file.flush()
self.__initialized = True
@locked
def incr(self, table):
sz = len(self.__tables)
self.logger.info("Size: %s - Attaching %s to %s" %
(sz, table, self.__hdf_path))
if table in self.__tables:
self.logger.warn("Already added")
raise omero.ApiUsageException(None, None, "Already added")
self.__tables.append(table)
return sz + 1
@locked
def decr(self, table):
sz = len(self.__tables)
self.logger.info(
"Size: %s - Detaching %s from %s", sz, table, self.__hdf_path)
if not (table in self.__tables):
self.logger.warn("Unknown table")
raise omero.ApiUsageException(None, None, "Unknown table")
self.__tables.remove(table)
if sz <= 1:
self.cleanup()
return sz - 1
@locked
def uptodate(self, stamp):
return self._stamp <= stamp
@locked
def rows(self):
self.__initcheck()
return self.__mea.nrows
@locked
def cols(self, size, current):
self.__initcheck()
ic = current.adapter.getCommunicator()
types = self.__types
names = self.__mea.colnames
descs = self.__descriptions
cols = []
for i in range(len(types)):
t = types[i]
n = names[i]
d = descs[i]
try:
col = ic.findObjectFactory(t).create(t)
col.name = n
col.description = d
col.setsize(size)
col.settable(self.__mea)
cols.append(col)
except:
msg = traceback.format_exc()
raise omero.ValidationException(
None, msg, "BAD COLUMN TYPE: %s for %s" % (t, n))
return cols
@locked
def get_meta_map(self):
self.__initcheck()
metadata = {}
attr = self.__mea.attrs
keys = list(self.__mea.attrs._v_attrnamesuser)
for key in keys:
val = attr[key]
if isinstance(val, float):
val = rfloat(val)
elif isinstance(val, int):
val = rint(val)
elif isinstance(val, long):
val = rlong(val)
elif isinstance(val, str):
val = rstring(val)
else:
raise omero.ValidationException("BAD TYPE: %s" % type(val))
metadata[key] = val
return metadata
@locked
@modifies
def add_meta_map(self, m, replace=False, init=False):
if not init:
if int(self.__getversion()) < 2:
# Metadata methods were generally broken for v1 tables so
# the introduction of internal metadata attributes is unlikely
# to affect anyone.
# http://trac.openmicroscopy.org.uk/ome/ticket/12606
msg = 'Tables metadata is only supported for OMERO.tables >= 2'
self.logger.error(msg)
raise omero.ApiUsageException(None, None, msg)
self.__initcheck()
for k, v in m.iteritems():
if internal_attr(k):
raise omero.ApiUsageException(
None, None, "Reserved attribute name: %s" % k)
if not isinstance(v, (
omero.RString, omero.RLong, omero.RInt, omero.RFloat)):
raise omero.ValidationException(
"Unsupported type: %s" % type(v))
attr = self.__mea.attrs
if replace:
for f in list(attr._v_attrnamesuser):
if init or not internal_attr(f):
del attr[f]
if not m:
return
for k, v in m.iteritems():
# This uses the default pytables type conversion, which may
# convert it to a numpy type or keep it as a native Python type
attr[k] = unwrap(v)
@locked
@modifies
def append(self, cols):
self.__initcheck()
# Optimize!
arrays = []
dtypes = []
sz = None
for col in cols:
if sz is None:
sz = col.getsize()
else:
if sz != col.getsize():
raise omero.ValidationException(
"Columns are of differing length")
arrays.extend(col.arrays())
dtypes.extend(col.dtypes())
col.append(self.__mea) # Potential corruption !!!
# Convert column-wise data to row-wise records
records = numpy.array(zip(*arrays), dtype=dtypes)
self.__mea.append(records)
#
# Stamped methods
#
@stamped
@modifies
def update(self, stamp, data):
self.__initcheck()
if data:
for i, rn in enumerate(data.rowNumbers):
for col in data.columns:
getattr(self.__mea.cols, col.name)[rn] = col.values[i]
@stamped
def getWhereList(self, stamp, condition, variables, unused,
start, stop, step):
self.__initcheck()
try:
return self.__mea.getWhereList(condition, variables, None,
start, stop, step).tolist()
except (NameError, SyntaxError, TypeError, ValueError), err:
aue = omero.ApiUsageException()
aue.message = "Bad condition: %s, %s" % (condition, variables)
aue.serverStackTrace = "".join(traceback.format_exc())
aue.serverExceptionClass = str(err.__class__.__name__)
raise aue
def _as_data(self, cols, rowNumbers):
"""
Constructs a omero.grid.Data object for returning to the client.
"""
data = omero.grid.Data()
data.columns = cols
data.rowNumbers = rowNumbers
# Convert to millis since epoch
data.lastModification = long(self._stamp * 1000)
return data
@stamped
def readCoordinates(self, stamp, rowNumbers, current):
self.__initcheck()
self.__sizecheck(None, rowNumbers)
cols = self.cols(None, current)
for col in cols:
col.readCoordinates(self.__mea, rowNumbers)
return self._as_data(cols, rowNumbers)
@stamped
def read(self, stamp, colNumbers, start, stop, current):
self.__initcheck()
self.__sizecheck(colNumbers, None)
cols = self.cols(None, current)
rows = self._getrows(start, stop)
rv, l = self._rowstocols(rows, colNumbers, cols)
return self._as_data(rv, range(start, start + l))
def _getrows(self, start, stop):
return self.__mea.read(start, stop)
def _rowstocols(self, rows, colNumbers, cols):
l = 0
rv = []
for i in colNumbers:
col = cols[i]
col.fromrows(rows)
rv.append(col)
if not l:
l = len(col.values)
return rv, l
@stamped
def slice(self, stamp, colNumbers, rowNumbers, current):
self.__initcheck()
if colNumbers is None or len(colNumbers) == 0:
colNumbers = range(self.__width())
if rowNumbers is None or len(rowNumbers) == 0:
rowNumbers = range(self.__length())
self.__sizecheck(colNumbers, rowNumbers)
cols = self.cols(None, current)
rv = []
for i in colNumbers:
col = cols[i]
col.readCoordinates(self.__mea, rowNumbers)
rv.append(col)
return self._as_data(rv, rowNumbers)
#
# Lifecycle methods
#
def check(self):
return True
@locked
def cleanup(self):
self.logger.info("Cleaning storage: %s", self.__hdf_path)
if self.__mea:
self.__mea = None
if self.__ome:
self.__ome = None
if self.__hdf_file:
HDFLIST.remove(self.__hdf_path, self.__hdf_file)
hdffile = self.__hdf_file
self.__hdf_file = None
hdffile.close() # Resources freed
# End class HdfStorage
class TableI(omero.grid.Table, omero.util.SimpleServant):
"""
Spreadsheet implementation based on pytables.
"""
def __init__(self, ctx, file_obj, factory, storage, uuid="unknown",
call_context=None):
self.uuid = uuid
self.file_obj = file_obj
self.factory = factory
self.storage = storage
self.call_context = call_context
self.can_write = factory.getAdminService().canUpdate(
file_obj, call_context)
omero.util.SimpleServant.__init__(self, ctx)
self.stamp = time.time()
self.storage.incr(self)
self._closed = False
if (not self.file_obj.isLoaded() or
self.file_obj.getDetails() is None or
self.file_obj.details.group is None):
self.file_obj = self.ctx.getSession().getQueryService().get(
'omero.model.OriginalFileI', unwrap(file_obj.id),
{"omero.group": "-1"})
def assert_write(self):
"""
Checks that the current user can write to the given object
at the database level. If not, no FS level writes are permitted
either.
ticket:2910
"""
if not self.can_write:
raise omero.SecurityViolation(
"Current user cannot write to file %s" % self.file_obj.id.val)
def check(self):
"""
Called periodically to check the resource is alive. Returns
False if this resource can be cleaned up. (Resources API)
"""
self.logger.debug("Checking %s" % self)
if self._closed:
return False
idname = 'UNKNOWN'
try:
idname = self.factory.ice_getIdentity().name
clientSession = self.ctx.getSession().getSessionService() \
.getSession(idname)
if clientSession.getClosed():
self.logger.debug("Client session closed: %s" % idname)
return False
return True
except Exception:
self.logger.debug("Client session not found: %s" % idname)
return False
def cleanup(self):
"""
Decrements the counter on the held storage to allow it to
be cleaned up. Returns the current file-size.
"""
if self.storage:
try:
self.storage.decr(self)
return self.storage.size()
finally:
self.storage = None
def __str__(self):
return "Table-%s" % self.uuid
@remoted
@perf
def close(self, current=None):
if self._closed:
self.logger.warn(
"File object %d already closed",
unwrap(self.file_obj.id) if self.file_obj else None)
return
modified = self.storage.modified()
try:
size = self.cleanup()
self.logger.info("Closed %s", self)
except:
self.logger.warn("Closed %s with errors", self)
self._closed = True
fid = unwrap(self.file_obj.id)
if self.file_obj is not None and self.can_write and modified:
gid = unwrap(self.file_obj.details.group.id)
client_uuid = self.factory.ice_getIdentity().category[8:]
ctx = {
"omero.group": str(gid),
omero.constants.CLIENTUUID: client_uuid}
try:
# Size to reset the server object to (must be checked after
# the underlying HDF file has been closed)
rfs = self.factory.createRawFileStore(ctx)
try:
rfs.setFileId(fid, ctx)
if size:
rfs.truncate(size, ctx) # May do nothing
rfs.write([], size, 0, ctx) # Force an update
else:
rfs.write([], 0, 0, ctx) # No-op
file_obj = rfs.save(ctx)
finally:
rfs.close(ctx)
self.logger.info(
"Updated file object %s to hash=%s (%s bytes)",
fid, unwrap(file_obj.hash), unwrap(file_obj.size))
except:
self.logger.warn("Failed to update file object %s",
fid, exc_info=1)
else:
self.logger.info("File object %s not updated", fid)
# TABLES READ API ============================
@remoted
@perf
def getOriginalFile(self, current=None):
msg = "unknown"
if self.file_obj:
if self.file_obj.id:
msg = self.file_obj.id.val
self.logger.info("%s.getOriginalFile() => id=%s", self, msg)
return self.file_obj
@remoted
@perf
def getHeaders(self, current=None):
rv = self.storage.cols(None, current)
self.logger.info("%s.getHeaders() => size=%s", self, slen(rv))
return rv
@remoted
@perf
def getNumberOfRows(self, current=None):
rv = self.storage.rows()
self.logger.info("%s.getNumberOfRows() => %s", self, rv)
return long(rv)
@remoted
@perf
def getWhereList(self, condition, variables,
start, stop, step, current=None):
variables = unwrap(variables)
if stop == 0:
stop = None
if step == 0:
step = None
rv = self.storage.getWhereList(
self.stamp, condition, variables, None, start, stop, step)
self.logger.info("%s.getWhereList(%s, %s, %s, %s, %s) => size=%s",
self, condition, variables,
start, stop, step, slen(rv))
return rv
@remoted
@perf
def readCoordinates(self, rowNumbers, current=None):
self.logger.info("%s.readCoordinates(size=%s)", self, slen(rowNumbers))
try:
return self.storage.readCoordinates(self.stamp, rowNumbers,
current)
except tables.HDF5ExtError, err:
aue = omero.ApiUsageException()
aue.message = "Error reading coordinates. Most likely out of range"
aue.serverStackTrace = "".join(traceback.format_exc())
aue.serverExceptionClass = str(err.__class__.__name__)
raise aue
@remoted
@perf
def read(self, colNumbers, start, stop, current=None):
self.logger.info("%s.read(%s, %s, %s)", self, colNumbers, start, stop)
if start == 0L and stop == 0L:
stop = None
try:
return self.storage.read(self.stamp, colNumbers,
start, stop, current)
except tables.HDF5ExtError, err:
aue = omero.ApiUsageException()
aue.message = "Error reading coordinates. Most likely out of range"
aue.serverStackTrace = "".join(traceback.format_exc())
aue.serverExceptionClass = str(err.__class__.__name__)
raise aue
@remoted
@perf
def slice(self, colNumbers, rowNumbers, current=None):
self.logger.info(
"%s.slice(size=%s, size=%s)", self,
slen(colNumbers), slen(rowNumbers))
return self.storage.slice(self.stamp, colNumbers, rowNumbers, current)
# TABLES WRITE API ===========================
@remoted
@perf
def initialize(self, cols, current=None):
self.assert_write()
self.storage.initialize(cols)
if cols:
self.logger.info("Initialized %s with %s col(s)", self, slen(cols))
@remoted
@perf
def addColumn(self, col, current=None):
self.assert_write()
raise omero.ApiUsageException(None, None, "NYI")
@remoted
@perf
def addData(self, cols, current=None):
self.assert_write()
self.storage.append(cols)
if cols and cols[0] and cols[0].getsize():
self.logger.info(
"Added %s row(s) of data to %s", cols[0].getsize(), self)
@remoted
@perf
def update(self, data, current=None):
self.assert_write()
if data:
self.storage.update(self.stamp, data)
self.logger.info(
"Updated %s row(s) of data to %s", slen(data.rowNumbers), self)
@remoted
@perf
def delete(self, current=None):
self.assert_write()
self.close()
dc = omero.cmd.Delete2(
targetObjects={"OriginalFile": [self.file_obj.id.val]}
)
handle = self.factory.submit(dc)
# Copied from clients.py since none is available
try:
callback = omero.callbacks.CmdCallbackI(
current.adapter, handle, "Fake")
except:
# Since the callback won't escape this method,
# close the handle if requested.
handle.close()
raise
try:
callback.loop(20, 500)
except LockTimeout:
callback.close(True)
raise omero.InternalException(None, None, "delete timed-out")
rsp = callback.getResponse()
if isinstance(rsp, omero.cmd.ERR):
raise omero.InternalException(None, None, str(rsp))
self.file_obj = None
# TABLES METADATA API ===========================
@remoted
@perf
def getMetadata(self, key, current=None):
rv = self.storage.get_meta_map()
rv = rv.get(key)
self.logger.info("%s.getMetadata() => %s", self, unwrap(rv))
return rv
@remoted
@perf
def getAllMetadata(self, current=None):
rv = self.storage.get_meta_map()
self.logger.info("%s.getMetadata() => size=%s", self, slen(rv))
return rv
@remoted
@perf
def setMetadata(self, key, value, current=None):
self.assert_write()
self.storage.add_meta_map({key: value})
self.logger.info("%s.setMetadata() => %s=%s", self, key, unwrap(value))
@remoted
@perf
def setAllMetadata(self, value, current=None):
self.assert_write()
self.storage.add_meta_map(value, replace=True)
self.logger.info("%s.setMetadata() => number=%s", self, slen(value))
# Column methods missing
class TablesI(omero.grid.Tables, omero.util.Servant):
"""
Implementation of the omero.grid.Tables API. Provides
spreadsheet like functionality across the OMERO.grid.
This servant serves as a session-less, user-less
resource for obtaining omero.grid.Table proxies.
The first major step in initialization is getting
a session. This will block until the Blitz server
is reachable.
"""
def __init__(
self, ctx,
table_cast=omero.grid.TablePrx.uncheckedCast,
internal_repo_cast=omero.grid.InternalRepositoryPrx.checkedCast):
omero.util.Servant.__init__(self, ctx, needs_session=True)
# Storing these methods, mainly to allow overriding via
# test methods. Static methods are evil.
self._table_cast = table_cast
self._internal_repo_cast = internal_repo_cast
self.__stores = []
self._get_dir()
self._get_uuid()
self._get_repo()
def _get_dir(self):
"""
Second step in initialization is to find the .omero/repository
directory. If this is not created, then a required server has
not started, and so this instance will not start.
"""
wait = int(self.communicator.getProperties().getPropertyWithDefault(
"omero.repo.wait", "1"))
self.repo_dir = self.communicator.getProperties().getProperty(
"omero.repo.dir")
if not self.repo_dir:
# Implies this is the legacy directory. Obtain from server
self.repo_dir = self.ctx.getSession(
).getConfigService().getConfigValue("omero.data.dir")
self.repo_cfg = path(self.repo_dir) / ".omero" / "repository"
start = time.time()
while not self.repo_cfg.exists() and wait < (time.time() - start):
self.logger.info(
"%s doesn't exist; waiting 5 seconds..." % self.repo_cfg)
self.stop_event.wait(5)
if not self.repo_cfg.exists():
msg = "No repository found: %s" % self.repo_cfg
self.logger.error(msg)
raise omero.ResourceError(None, None, msg)
def _get_uuid(self):
"""
Third step in initialization is to find the database uuid
for this grid instance. Multiple OMERO.grids could be watching
the same directory.
"""
cfg = self.ctx.getSession().getConfigService()
self.db_uuid = cfg.getDatabaseUuid()
self.instance = self.repo_cfg / self.db_uuid
def _get_repo(self):
"""
Fourth step in initialization is to find the repository object
for the UUID found in .omero/repository/<db_uuid>, and then
create a proxy for the InternalRepository attached to that.
"""
# Get and parse the uuid from the RandomAccessFile format from
# FileMaker
self.repo_uuid = (self.instance / "repo_uuid").lines()[0].strip()
if len(self.repo_uuid) != 38:
raise omero.ResourceError(
"Poorly formed UUID: %s" % self.repo_uuid)
self.repo_uuid = self.repo_uuid[2:]
# Using the repo_uuid, find our OriginalFile object
self.repo_obj = self.ctx.getSession().getQueryService().findByQuery(
"select f from OriginalFile f where hash = :uuid",
omero.sys.ParametersI().add("uuid", rstring(self.repo_uuid)))
self.repo_mgr = self.communicator.stringToProxy(
"InternalRepository-%s" % self.repo_uuid)
self.repo_mgr = self._internal_repo_cast(self.repo_mgr)
self.repo_svc = self.repo_mgr.getProxy()
@remoted
def getRepository(self, current=None):
"""
Returns the Repository object for this Tables server.
"""
return self.repo_svc
@remoted
@perf
def getTable(self, file_obj, factory, current=None):
"""
Create and/or register a table servant.
"""
# Will throw an exception if not allowed.
file_id = None
if file_obj is not None and file_obj.id is not None:
file_id = file_obj.id.val
self.logger.info("getTable: %s %s", file_id, current.ctx)
file_path = self.repo_mgr.getFilePath(file_obj)
p = path(file_path).dirname()
if not p.exists():
p.makedirs()
storage = HDFLIST.getOrCreate(file_path)
id = Ice.Identity()
id.name = Ice.generateUUID()
table = TableI(self.ctx, file_obj, factory, storage, uuid=id.name,
call_context=current.ctx)
self.resources.add(table)
prx = current.adapter.add(table, id)
return self._table_cast(prx)
|
gpl-2.0
|
personalrobotics/chimera
|
test/examples/30_pybind11_examples/02_advanced/02_classes/classes.py
|
1
|
3784
|
import unittest
import classes_pybind11 as py11
import classes_boost_python as boost
class TestClasses(unittest.TestCase):
def _test_override_virtual_functions(self, binding):
d = binding.test1.Dog()
self.assertEqual(binding.test1.call_go(d), "woof! woof! woof! ")
if binding is py11:
class Cat(binding.test1.Animal):
def go(self, n_times):
return "meow! " * n_times
c = Cat()
self.assertEqual(binding.test1.call_go(c), "meow! meow! meow! ")
def test_override_virtual_functions(self):
self._test_override_virtual_functions(py11)
self._test_override_virtual_functions(boost)
def _test_combining_virtual_functions_and_inheritance(self, binding):
if binding is py11:
class ShihTzu(binding.test2.Dog):
def bark(self):
return "yip!"
s = ShihTzu()
self.assertEqual(binding.test2.call_bark(s), "yip!")
def test_combining_virtual_functions_and_inheritance(self):
self._test_combining_virtual_functions_and_inheritance(py11)
self._test_combining_virtual_functions_and_inheritance(boost)
def _test_operator_overloading(self, binding):
v1 = binding.test3.Vector2(1, 2)
v2 = binding.test3.Vector2(3, -1)
self.assertEqual(v1.get_x(), 1)
self.assertEqual(v1.get_y(), 2)
self.assertEqual(v2.get_x(), 3)
self.assertEqual(v2.get_y(), -1)
self.assertAlmostEqual((v1 + v2).get_x(), 4)
self.assertAlmostEqual((v1 + v2).get_y(), 1)
self.assertAlmostEqual((v1 - v2).get_x(), -2)
self.assertAlmostEqual((v1 - v2).get_y(), 3)
self.assertAlmostEqual((v1 * v2).get_x(), 3)
self.assertAlmostEqual((v1 * v2).get_y(), -2)
self.assertAlmostEqual((v1 / v2).get_x(), 1/3)
self.assertAlmostEqual((v1 / v2).get_y(), -2)
self.assertAlmostEqual((v1 + 8).get_x(), 9)
self.assertAlmostEqual((v1 + 8).get_y(), 10)
self.assertAlmostEqual((v1 - 2).get_x(), -1)
self.assertAlmostEqual((v1 - 2).get_y(), 0)
self.assertAlmostEqual((v1 * 8).get_x(), 8)
self.assertAlmostEqual((v1 * 8).get_y(), 16)
self.assertAlmostEqual((v1 / 2).get_x(), 0.5)
self.assertAlmostEqual((v1 / 2).get_y(), 1.0)
v1 += v2
self.assertEqual(v1.get_x(), 4)
self.assertEqual(v1.get_y(), 1)
v1 -= v2
self.assertEqual(v1.get_x(), 1)
self.assertEqual(v1.get_y(), 2)
v1 *= v2
self.assertEqual(v1.get_x(), 3)
self.assertEqual(v1.get_y(), -2)
v1 /= v2
self.assertEqual(v1.get_x(), 1)
self.assertEqual(v1.get_y(), 2)
v3 = v1.__iadd__(v2)
self.assertEqual(v1.get_x(), 4)
self.assertEqual(v1.get_y(), 1)
self.assertEqual(v3.get_x(), 4)
self.assertEqual(v3.get_y(), 1)
# TODO: Non-member operators are not supported
# v3 = 2 + v1
# self.assertEqual(v3.get_x(), 3)
# self.assertEqual(v3.get_y(), 4)
# TODO: Non-member operators are not supported
# v3 = 1 - v1
# self.assertEqual(v3.get_x(), 0)
# self.assertEqual(v3.get_y(), -1)
# TODO: Non-member operators are not supported
# v3 = 3 * v1
# self.assertEqual(v3.get_x(), 3)
# self.assertEqual(v3.get_y(), 6)
# TODO: Non-member operators are not supported
# v3 = 4 / v1
# self.assertEqual(v3.get_x(), 1)
# self.assertEqual(v3.get_y(), 2)
def test_operator_overloading(self):
self._test_operator_overloading(py11)
self._test_operator_overloading(boost)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
GarbojaxMcBruce/Machine_Learning
|
LEA Original Solution/Envirionment.py
|
1
|
5902
|
class Environment:
name = ''
hunter = ''
hunter_position = [0,0]
creature = ''
creature_position = [0,0]
food_position = [0,0]
food_value = 100
size = 10
game_state = 1
def __init__(self, name, size, creature, hunter, random):
self.name = name
self.size = size
self.creature = creature
self.hunter = hunter
self.food_position = [random.randint(0, self.size - 1),
random.randint(0, self.size - 1)]
self.hunter_position = [random.randint(0, self.size - 1),
random.randint(0, self.size - 1)]
self.creature_position = [random.randint(0, self.size - 1),
random.randint(0, self.size - 1)]
#Resets the playing field, but not the creatures brain
def reset(self, random):
self.creature.reset_life()
self.game_state = 1
self.food_position = [random.randint(0, self.size - 1),
random.randint(0, self.size - 1)]
self.hunter_position = [random.randint(0, self.size - 1),
random.randint(0, self.size - 1)]
self.creature_position = [random.randint(0, self.size - 1),
random.randint(0, self.size - 1)]
def get_position_state(self):
position_state = (self.creature_position + self.hunter_position +
self.food_position)
return position_state
#Will process reward for this step
def calculate_reward(self, random):
reward = self.creature.step_value
if(self.food_position == self.creature_position):
reward += self.food_value
self.food_position = [random.randint(0, self.size - 1),
random.randint(0, self.size - 1)]
if(self.creature_position == self.hunter_position):
reward -= self.hunter.value
return reward
def update_game_state(self):
if(self.creature.check_status != 1):
self.game_state = 0
def move_item(self, new_pos):
if(new_pos[0] > self.size - 1):
new_pos[0] = self.size - 1
if(new_pos[0] < 0):
new_pos[0] = 0
if(new_pos[1] > self.size - 1):
new_pos[1] = self.size - 1
if(new_pos[1] < 0):
new_pos[1] = 0
return new_pos
def itteration(self, random):
#creature moves
creature_move = self.creature.epsilon_greedy_move()
self.creature_position = self.move_item(
[sum(pos) for pos in zip(creature_move, self.creature_position)]
)
#hunter moves
hunter_move = self.hunter.auto_move(random)
self.hunter_position = self.move_item(
[sum(pos) for pos in zip(hunter_move, self.hunter_position)]
)
#check reward
reward = self.calculate_reward(random)
self.creature.get_reward(reward)
#update state of game
self.update_game_state()
def learning_itteration(self, random):
#Get present positional state
initial_position_state = self.get_position_state()
#creature move, this will also be our action: q
creature_move = self.creature.epsilon_greedy_move()
self.creature_position = self.move_item(
[sum(pos) for pos in zip(creature_move, self.creature_position)]
)
#hunter move
hunter_move = self.hunter.auto_move(random)
self.hunter_position = self.move_item(
[sum(pos) for pos in zip(hunter_move, self.hunter_position)]
)
#check reward
reward = self.calculate_reward()
self.creature.get_reward(reward)
#send history information to creature
X = initial_position_state + creature_move
y = reward
self.creature.absorb_experience(X, y)
#Update game state
self.update_game_state()
def watching_itteration(self, random):
#Get present positional state
initial_position_state = self.get_position_state()
#creature move, this will also be our action: q
creature_move = self.creature.random_move(random)
self.creature_position = self.move_item(
[sum(pos) for pos in zip(creature_move, self.creature_position)]
)
#hunter move
hunter_move = self.hunter.auto_move()
self.hunter_position = self.move_item(
[sum(pos) for pos in zip(hunter_move, self.hunter_position)]
)
#check reward
reward = self.calculate_reward(random)
self.creature.get_reward(reward)
#send history information to creature
X = initial_position_state + creature_move
y = reward
self.creature.absorb_experience(X, y)
#Update game state
self.update_game_state()
pass
def print_space(self):
print('{}\n\n'.format(self.name + ' space:'))
for i in range(0, self.size):
for j in range(0, self.size):
if(i == self.creature_position[0] and
j == self.creature_position[1]):
print('C ', end = '')
elif(i == self.food_position[0] and
j == self.food_position[1]):
print('F ', end = '')
elif(i == self.human_position[0] and
j == self.human_position[1]):
print('H ', end = '')
else:
print('_ ', end = '')
print('\n')
|
gpl-3.0
|
mikemintz/neutron
|
modules/xmpp/debug.py
|
207
|
14069
|
## debug.py
##
## Copyright (C) 2003 Jacob Lundqvist
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published
## by the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
_version_ = '1.4.0'
"""\
Generic debug class
Other modules can always define extra debug flags for local usage, as long as
they make sure they append them to debug_flags
Also its always a good thing to prefix local flags with something, to reduce risk
of coliding flags. Nothing breaks if two flags would be identical, but it might
activate unintended debugging.
flags can be numeric, but that makes analysing harder, on creation its
not obvious what is activated, and when flag_show is given, output isnt
really meaningfull.
This Debug class can either be initialized and used on app level, or used independantly
by the individual classes.
For samples of usage, see samples subdir in distro source, and selftest
in this code
"""
import sys
import traceback
import time
import os
import types
if os.environ.has_key('TERM'):
colors_enabled=True
else:
colors_enabled=False
color_none = chr(27) + "[0m"
color_black = chr(27) + "[30m"
color_red = chr(27) + "[31m"
color_green = chr(27) + "[32m"
color_brown = chr(27) + "[33m"
color_blue = chr(27) + "[34m"
color_magenta = chr(27) + "[35m"
color_cyan = chr(27) + "[36m"
color_light_gray = chr(27) + "[37m"
color_dark_gray = chr(27) + "[30;1m"
color_bright_red = chr(27) + "[31;1m"
color_bright_green = chr(27) + "[32;1m"
color_yellow = chr(27) + "[33;1m"
color_bright_blue = chr(27) + "[34;1m"
color_purple = chr(27) + "[35;1m"
color_bright_cyan = chr(27) + "[36;1m"
color_white = chr(27) + "[37;1m"
"""
Define your flags in yor modules like this:
from debug import *
DBG_INIT = 'init' ; debug_flags.append( DBG_INIT )
DBG_CONNECTION = 'connection' ; debug_flags.append( DBG_CONNECTION )
The reason for having a double statement wis so we can validate params
and catch all undefined debug flags
This gives us control over all used flags, and makes it easier to allow
global debugging in your code, just do something like
foo = Debug( debug_flags )
group flags, that is a flag in it self containing multiple flags should be
defined without the debug_flags.append() sequence, since the parts are already
in the list, also they must of course be defined after the flags they depend on ;)
example:
DBG_MULTI = [ DBG_INIT, DBG_CONNECTION ]
NoDebug
-------
To speed code up, typically for product releases or such
use this class instead if you globaly want to disable debugging
"""
class NoDebug:
def __init__( self, *args, **kwargs ):
self.debug_flags = []
def show( self, *args, **kwargs):
pass
def Show( self, *args, **kwargs):
pass
def is_active( self, flag ):
pass
colors={}
def active_set( self, active_flags = None ):
return 0
LINE_FEED = '\n'
class Debug:
def __init__( self,
#
# active_flags are those that will trigger output
#
active_flags = None,
#
# Log file should be file object or file namne
#
log_file = sys.stderr,
#
# prefix and sufix can either be set globaly or per call.
# personally I use this to color code debug statements
# with prefix = chr(27) + '[34m'
# sufix = chr(27) + '[37;1m\n'
#
prefix = 'DEBUG: ',
sufix = '\n',
#
# If you want unix style timestamps,
# 0 disables timestamps
# 1 before prefix, good when prefix is a string
# 2 after prefix, good when prefix is a color
#
time_stamp = 0,
#
# flag_show should normaly be of, but can be turned on to get a
# good view of what flags are actually used for calls,
# if it is not None, it should be a string
# flags for current call will be displayed
# with flag_show as separator
# recomended values vould be '-' or ':', but any string goes
#
flag_show = None,
#
# If you dont want to validate flags on each call to
# show(), set this to 0
#
validate_flags = 1,
#
# If you dont want the welcome message, set to 0
# default is to show welcome if any flags are active
welcome = -1
):
self.debug_flags = []
if welcome == -1:
if active_flags and len(active_flags):
welcome = 1
else:
welcome = 0
self._remove_dupe_flags()
if log_file:
if type( log_file ) is type(''):
try:
self._fh = open(log_file,'w')
except:
print 'ERROR: can open %s for writing'
sys.exit(0)
else: ## assume its a stream type object
self._fh = log_file
else:
self._fh = sys.stdout
if time_stamp not in (0,1,2):
msg2 = '%s' % time_stamp
raise 'Invalid time_stamp param', msg2
self.prefix = prefix
self.sufix = sufix
self.time_stamp = time_stamp
self.flag_show = None # must be initialised after possible welcome
self.validate_flags = validate_flags
self.active_set( active_flags )
if welcome:
self.show('')
caller = sys._getframe(1) # used to get name of caller
try:
mod_name= ":%s" % caller.f_locals['__name__']
except:
mod_name = ""
self.show('Debug created for %s%s' % (caller.f_code.co_filename,
mod_name ))
self.show(' flags defined: %s' % ','.join( self.active ))
if type(flag_show) in (type(''), type(None)):
self.flag_show = flag_show
else:
msg2 = '%s' % type(flag_show )
raise 'Invalid type for flag_show!', msg2
def show( self, msg, flag = None, prefix = None, sufix = None,
lf = 0 ):
"""
flag can be of folowing types:
None - this msg will always be shown if any debugging is on
flag - will be shown if flag is active
(flag1,flag2,,,) - will be shown if any of the given flags
are active
if prefix / sufix are not given, default ones from init will be used
lf = -1 means strip linefeed if pressent
lf = 1 means add linefeed if not pressent
"""
if self.validate_flags:
self._validate_flag( flag )
if not self.is_active(flag):
return
if prefix:
pre = prefix
else:
pre = self.prefix
if sufix:
suf = sufix
else:
suf = self.sufix
if self.time_stamp == 2:
output = '%s%s ' % ( pre,
time.strftime('%b %d %H:%M:%S',
time.localtime(time.time() )),
)
elif self.time_stamp == 1:
output = '%s %s' % ( time.strftime('%b %d %H:%M:%S',
time.localtime(time.time() )),
pre,
)
else:
output = pre
if self.flag_show:
if flag:
output = '%s%s%s' % ( output, flag, self.flag_show )
else:
# this call uses the global default,
# dont print "None", just show the separator
output = '%s %s' % ( output, self.flag_show )
output = '%s%s%s' % ( output, msg, suf )
if lf:
# strip/add lf if needed
last_char = output[-1]
if lf == 1 and last_char != LINE_FEED:
output = output + LINE_FEED
elif lf == -1 and last_char == LINE_FEED:
output = output[:-1]
try:
self._fh.write( output )
except:
# unicode strikes again ;)
s=u''
for i in range(len(output)):
if ord(output[i]) < 128:
c = output[i]
else:
c = '?'
s=s+c
self._fh.write( '%s%s%s' % ( pre, s, suf ))
self._fh.flush()
def is_active( self, flag ):
'If given flag(s) should generate output.'
# try to abort early to quicken code
if not self.active:
return 0
if not flag or flag in self.active:
return 1
else:
# check for multi flag type:
if type( flag ) in ( type(()), type([]) ):
for s in flag:
if s in self.active:
return 1
return 0
def active_set( self, active_flags = None ):
"returns 1 if any flags where actually set, otherwise 0."
r = 0
ok_flags = []
if not active_flags:
#no debuging at all
self.active = []
elif type( active_flags ) in ( types.TupleType, types.ListType ):
flags = self._as_one_list( active_flags )
for t in flags:
if t not in self.debug_flags:
sys.stderr.write('Invalid debugflag given: %s\n' % t )
ok_flags.append( t )
self.active = ok_flags
r = 1
else:
# assume comma string
try:
flags = active_flags.split(',')
except:
self.show( '***' )
self.show( '*** Invalid debug param given: %s' % active_flags )
self.show( '*** please correct your param!' )
self.show( '*** due to this, full debuging is enabled' )
self.active = self.debug_flags
for f in flags:
s = f.strip()
ok_flags.append( s )
self.active = ok_flags
self._remove_dupe_flags()
return r
def active_get( self ):
"returns currently active flags."
return self.active
def _as_one_list( self, items ):
""" init param might contain nested lists, typically from group flags.
This code organises lst and remves dupes
"""
if type( items ) <> type( [] ) and type( items ) <> type( () ):
return [ items ]
r = []
for l in items:
if type( l ) == type([]):
lst2 = self._as_one_list( l )
for l2 in lst2:
self._append_unique_str(r, l2 )
elif l == None:
continue
else:
self._append_unique_str(r, l )
return r
def _append_unique_str( self, lst, item ):
"""filter out any dupes."""
if type(item) <> type(''):
msg2 = '%s' % item
raise 'Invalid item type (should be string)',msg2
if item not in lst:
lst.append( item )
return lst
def _validate_flag( self, flags ):
'verify that flag is defined.'
if flags:
for f in self._as_one_list( flags ):
if not f in self.debug_flags:
msg2 = '%s' % f
raise 'Invalid debugflag given', msg2
def _remove_dupe_flags( self ):
"""
if multiple instances of Debug is used in same app,
some flags might be created multiple time, filter out dupes
"""
unique_flags = []
for f in self.debug_flags:
if f not in unique_flags:
unique_flags.append(f)
self.debug_flags = unique_flags
colors={}
def Show(self, flag, msg, prefix=''):
msg=msg.replace('\r','\\r').replace('\n','\\n').replace('><','>\n <')
if not colors_enabled: pass
elif self.colors.has_key(prefix): msg=self.colors[prefix]+msg+color_none
else: msg=color_none+msg
if not colors_enabled: prefixcolor=''
elif self.colors.has_key(flag): prefixcolor=self.colors[flag]
else: prefixcolor=color_none
if prefix=='error':
_exception = sys.exc_info()
if _exception[0]:
msg=msg+'\n'+''.join(traceback.format_exception(_exception[0], _exception[1], _exception[2])).rstrip()
prefix= self.prefix+prefixcolor+(flag+' '*12)[:12]+' '+(prefix+' '*6)[:6]
self.show(msg, flag, prefix)
def is_active( self, flag ):
if not self.active: return 0
if not flag or flag in self.active and DBG_ALWAYS not in self.active or flag not in self.active and DBG_ALWAYS in self.active : return 1
return 0
DBG_ALWAYS='always'
##Uncomment this to effectively disable all debugging and all debugging overhead.
#Debug=NoDebug
|
gpl-2.0
|
defionscode/ansible-modules-core
|
network/netvisor/pn_ospfarea.py
|
30
|
6219
|
#!/usr/bin/python
""" PN-CLI vrouter-ospf-add/remove """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
DOCUMENTATION = """
---
module: pn_ospfarea
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1.0
short_description: CLI command to add/remove ospf area to/from a vrouter.
description:
- Execute vrouter-ospf-add, vrouter-ospf-remove command.
- This command adds/removes Open Shortest Path First(OSPF) area to/from
a virtual router(vRouter) service.
options:
pn_cliusername:
description:
- Login username.
required: true
pn_clipassword:
description:
- Login password.
required: true
pn_cliswitch:
description:
- Target switch(es) to run the CLI on.
required: False
state:
description:
- State the action to perform. Use 'present' to add ospf-area, 'absent'
to remove ospf-area and 'update' to modify ospf-area.
required: true
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify the name of the vRouter.
required: true
pn_ospf_area:
description:
- Specify the OSPF area number.
required: true
pn_stub_type:
description:
- Specify the OSPF stub type.
choices: ['none', 'stub', 'stub-no-summary', 'nssa', 'nssa-no-summary']
pn_prefix_listin:
description:
- OSPF prefix list for filtering incoming packets.
pn_prefix_listout:
description:
- OSPF prefix list for filtering outgoing packets.
pn_quiet:
description:
- Enable/disable system information.
required: false
default: true
"""
EXAMPLES = """
- name: "Add OSPF area to vrouter"
pn_ospfarea:
state: present
pn_cliusername: admin
pn_clipassword: admin
pn_ospf_area: 1.0.0.0
pn_stub_type: stub
- name: "Remove OSPF from vrouter"
pn_ospf:
state: absent
pn_cliusername: admin
pn_clipassword: admin
pn_vrouter_name: name-string
pn_ospf_area: 1.0.0.0
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the ospf command.
returned: always
type: list
stderr:
description: The set of error responses from the ospf command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-ospf-area-add'
if state == 'absent':
command = 'vrouter-ospf-area-remove'
if state == 'update':
command = 'vrouter-ospf-area-modify'
return command
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=True, type='str'),
pn_clipassword=dict(required=True, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str'),
state =dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_ospf_area=dict(required=True, type='str'),
pn_stub_type=dict(type='str', choices=['none', 'stub', 'nssa',
'stub-no-summary',
'nssa-no-summary']),
pn_prefix_listin=dict(type='str'),
pn_prefix_listout=dict(type='str'),
pn_quiet=dict(type='bool', default='True')
)
)
# Accessing the arguments
cliusername = module.params['pn_cliusername']
clipassword = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
ospf_area = module.params['pn_ospf_area']
stub_type = module.params['pn_stub_type']
prefix_listin = module.params['pn_prefix_listin']
prefix_listout = module.params['pn_prefix_listout']
quiet = module.params['pn_quiet']
command = get_command_from_state(state)
# Building the CLI command string
cli = '/usr/bin/cli'
if quiet is True:
cli += ' --quiet '
cli += ' --user %s:%s ' % (cliusername, clipassword)
if cliswitch:
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
cli += ' %s vrouter-name %s area %s ' % (command, vrouter_name, ospf_area)
if stub_type:
cli += ' stub-type ' + stub_type
if prefix_listin:
cli += ' prefix-list-in ' + prefix_listin
if prefix_listout:
cli += ' prefix-list-out ' + prefix_listout
# Run the CLI command
ospfcommand = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(ospfcommand)
# Response in JSON format
if result != 0:
module.exit_json(
command=cli,
stderr=err.rstrip("\r\n"),
changed=False
)
else:
module.exit_json(
command=cli,
stdout=out.rstrip("\r\n"),
changed=True
)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
gpl-3.0
|
alikins/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_subnet.py
|
6
|
11313
|
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_subnet
version_added: "2.1"
short_description: Manage Azure subnets.
description:
- Create, update or delete a subnet within a given virtual network. Allows setting and updating the address
prefix CIDR, which must be valid within the context of the virtual network. Use the azure_rm_networkinterface
module to associate interfaces with the subnet and assign specific IP addresses.
options:
resource_group:
description:
- Name of resource group.
required: true
name:
description:
- Name of the subnet.
required: true
address_prefix_cidr:
description:
- CIDR defining the IPv4 address space of the subnet. Must be valid within the context of the
virtual network.
required: true
aliases:
- address_prefix
security_group_name:
description:
- Name of an existing security group with which to associate the subnet.
required: false
default: null
aliases:
- security_group
state:
description:
- Assert the state of the subnet. Use 'present' to create or update a subnet and
'absent' to delete a subnet.
required: false
default: present
choices:
- absent
- present
virtual_network_name:
description:
- Name of an existing virtual network with which the subnet is or will be associated.
required: true
aliases:
- virtual_network
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a subnet
azure_rm_subnet:
name: foobar
virtual_network_name: My_Virtual_Network
resource_group: Testing
address_prefix_cidr: "10.1.0.0/24"
- name: Delete a subnet
azure_rm_subnet:
name: foobar
virtual_network_name: My_Virtual_Network
resource_group: Testing
state: absent
'''
RETURN = '''
state:
description: Current state of the subnet.
returned: success
type: complex
contains:
address_prefix:
description: IP address CIDR.
type: str
example: "10.1.0.0/16"
id:
description: Subnet resource path.
type: str
example: "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/My_Virtual_Network/subnets/foobar"
name:
description: Subnet name.
type: str
example: "foobar"
network_security_group:
type: complex
contains:
id:
description: Security group resource identifier.
type: str
example: "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/secgroupfoo"
name:
description: Name of the security group.
type: str
example: "secgroupfoo"
provisioning_state:
description: Success or failure of the provisioning event.
type: str
example: "Succeeded"
''' # NOQA
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN, azure_id_to_dict
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
def subnet_to_dict(subnet):
result = dict(
id=subnet.id,
name=subnet.name,
provisioning_state=subnet.provisioning_state,
address_prefix=subnet.address_prefix,
network_security_group=dict(),
)
if subnet.network_security_group:
id_keys = azure_id_to_dict(subnet.network_security_group.id)
result['network_security_group']['id'] = subnet.network_security_group.id
result['network_security_group']['name'] = id_keys['networkSecurityGroups']
return result
class AzureRMSubnet(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_network_name=dict(type='str', required=True, aliases=['virtual_network']),
address_prefix_cidr=dict(type='str', aliases=['address_prefix']),
security_group_name=dict(type='str', aliases=['security_group']),
)
required_if = [
('state', 'present', ['address_prefix_cidr'])
]
self.results = dict(
changed=False,
state=dict()
)
self.resource_group = None
self.name = None
self.state = None
self.virtual_etwork_name = None
self.address_prefix_cidr = None
self.security_group_name = None
super(AzureRMSubnet, self).__init__(self.module_arg_spec,
supports_check_mode=True,
required_if=required_if)
def exec_module(self, **kwargs):
nsg = None
subnet = None
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.state == 'present' and not CIDR_PATTERN.match(self.address_prefix_cidr):
self.fail("Invalid address_prefix_cidr value {0}".format(self.address_prefix_cidr))
if self.security_group_name:
nsg = self.get_security_group(self.security_group_name)
results = dict()
changed = False
try:
self.log('Fetching subnet {0}'.format(self.name))
subnet = self.network_client.subnets.get(self.resource_group,
self.virtual_network_name,
self.name)
self.check_provisioning_state(subnet, self.state)
results = subnet_to_dict(subnet)
if self.state == 'present':
if self.address_prefix_cidr:
if results['address_prefix'] != self.address_prefix_cidr:
self.log("CHANGED: subnet {0} address_prefix_cidr".format(self.name))
changed = True
results['address_prefix'] = self.address_prefix_cidr
if self.security_group_name:
if results['network_security_group'].get('id') != nsg.id:
self.log("CHANGED: subnet {0} network security group".format(self.name))
changed = True
results['network_security_group']['id'] = nsg.id
results['network_security_group']['name'] = nsg.name
elif self.state == 'absent':
changed = True
except CloudError:
# the subnet does not exist
if self.state == 'present':
changed = True
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
if self.state == 'present' and changed:
if not subnet:
# create new subnet
self.log('Creating subnet {0}'.format(self.name))
subnet = self.network_models.Subnet(
address_prefix=self.address_prefix_cidr
)
if nsg:
subnet.network_security_group = self.network_models.NetworkSecurityGroup(id=nsg.id,
location=nsg.location,
resource_guid=nsg.resource_guid)
else:
# update subnet
self.log('Updating subnet {0}'.format(self.name))
subnet = self.network_models.Subnet(
address_prefix=results['address_prefix']
)
if results['network_security_group'].get('id'):
nsg = self.get_security_group(results['network_security_group']['name'])
subnet.network_security_group = self.network_models.NetworkSecurityGroup(id=nsg.id,
location=nsg.location,
resource_guid=nsg.resource_guid)
self.results['state'] = self.create_or_update_subnet(subnet)
elif self.state == 'absent':
# delete subnet
self.delete_subnet()
# the delete does not actually return anything. if no exception, then we'll assume
# it worked.
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update_subnet(self, subnet):
try:
poller = self.network_client.subnets.create_or_update(self.resource_group,
self.virtual_network_name,
self.name,
subnet)
new_subnet = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating subnet {0} - {1}".format(self.name, str(exc)))
self.check_provisioning_state(new_subnet)
return subnet_to_dict(new_subnet)
def delete_subnet(self):
self.log('Deleting subnet {0}'.format(self.name))
try:
poller = self.network_client.subnets.delete(self.resource_group,
self.virtual_network_name,
self.name)
result = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting subnet {0} - {1}".format(self.name, str(exc)))
return result
def get_security_group(self, name):
self.log("Fetching security group {0}".format(name))
nsg = None
try:
nsg = self.network_client.network_security_groups.get(self.resource_group, name)
except Exception as exc:
self.fail("Error: fetching network security group {0} - {1}.".format(name, str(exc)))
return nsg
def main():
AzureRMSubnet()
if __name__ == '__main__':
main()
|
gpl-3.0
|
apmichaud/vitess-apm
|
test/schema.py
|
1
|
9883
|
#!/usr/bin/python
import logging
import unittest
import environment
import utils
import tablet
shard_0_master = tablet.Tablet()
shard_0_replica1 = tablet.Tablet()
shard_0_replica2 = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
shard_0_backup = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_replica1 = tablet.Tablet()
def setUpModule():
try:
environment.topo_server_setup()
setup_procs = [
shard_0_master.init_mysql(),
shard_0_replica1.init_mysql(),
shard_0_replica2.init_mysql(),
shard_0_rdonly.init_mysql(),
shard_0_backup.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica1.init_mysql(),
]
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_replica1.teardown_mysql(),
shard_0_replica2.teardown_mysql(),
shard_0_rdonly.teardown_mysql(),
shard_0_backup.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server_teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_replica1.remove_tree()
shard_0_replica2.remove_tree()
shard_0_rdonly.remove_tree()
shard_0_backup.remove_tree()
shard_1_master.remove_tree()
shard_1_replica1.remove_tree()
# statements to create the table
create_vt_select_test = [
('''create table vt_select_test%d (
id bigint not null,
msg varchar(64),
primary key (id)
) Engine=InnoDB''' % x).replace("\n", "")
for x in xrange(4)]
class TestSchema(unittest.TestCase):
def _check_tables(self, tablet, expectedCount):
tables = tablet.mquery('vt_test_keyspace', 'show tables')
self.assertEqual(len(tables), expectedCount,
'Unexpected table count on %s (not %u): %s' %
(tablet.tablet_alias, expectedCount, str(tables)))
def test_complex_schema(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
shard_0_master.init_tablet( 'master', 'test_keyspace', '0')
shard_0_replica1.init_tablet('replica', 'test_keyspace', '0')
shard_0_replica2.init_tablet('replica', 'test_keyspace', '0')
shard_0_rdonly.init_tablet( 'rdonly', 'test_keyspace', '0')
shard_0_backup.init_tablet( 'backup', 'test_keyspace', '0')
shard_1_master.init_tablet( 'master', 'test_keyspace', '1')
shard_1_replica1.init_tablet('replica', 'test_keyspace', '1')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# run checks now before we start the tablets
utils.validate_topology()
# create databases, start the tablets
for t in [shard_0_master, shard_0_replica1, shard_0_replica2,
shard_0_rdonly, shard_0_backup, shard_1_master, shard_1_replica1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None)
# wait for the tablets to start
shard_0_master.wait_for_vttablet_state('SERVING')
shard_0_replica1.wait_for_vttablet_state('SERVING')
shard_0_replica2.wait_for_vttablet_state('SERVING')
shard_0_rdonly.wait_for_vttablet_state('SERVING')
shard_0_backup.wait_for_vttablet_state('NOT_SERVING')
shard_1_master.wait_for_vttablet_state('SERVING')
shard_1_replica1.wait_for_vttablet_state('SERVING')
# make sure all replication is good
for t in [shard_0_master, shard_0_replica1, shard_0_replica2,
shard_0_rdonly, shard_0_backup, shard_1_master, shard_1_replica1]:
t.reset_replication()
utils.run_vtctl(['ReparentShard', '-force', 'test_keyspace/0', shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['ReparentShard', '-force', 'test_keyspace/1', shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['ValidateKeyspace', '-ping-tablets', 'test_keyspace'])
# check after all tablets are here and replication is fixed
utils.validate_topology(ping_tablets=True)
# shard 0: apply the schema using a complex schema upgrade, no
# reparenting yet
utils.run_vtctl(['ApplySchemaShard',
'-sql='+create_vt_select_test[0],
'test_keyspace/0'],
auto_log=True)
# check all expected hosts have the change:
# - master won't have it as it's a complex change
self._check_tables(shard_0_master, 0)
self._check_tables(shard_0_replica1, 1)
self._check_tables(shard_0_replica2, 1)
self._check_tables(shard_0_rdonly, 1)
self._check_tables(shard_0_backup, 1)
self._check_tables(shard_1_master, 0)
self._check_tables(shard_1_replica1, 0)
# shard 0: apply schema change to just master directly
# (to test its state is not changed)
utils.run_vtctl(['ApplySchema',
'-stop-replication',
'-sql='+create_vt_select_test[0],
shard_0_master.tablet_alias],
auto_log=True)
self._check_tables(shard_0_master, 1)
# shard 0: apply new schema change, with reparenting
utils.run_vtctl(['ApplySchemaShard',
'-new-parent='+shard_0_replica1.tablet_alias,
'-sql='+create_vt_select_test[1],
'test_keyspace/0'],
auto_log=True)
self._check_tables(shard_0_master, 1)
self._check_tables(shard_0_replica1, 2)
self._check_tables(shard_0_replica2, 2)
self._check_tables(shard_0_rdonly, 2)
self._check_tables(shard_0_backup, 2)
# verify GetSchema --tables works
s = utils.run_vtctl_json(['GetSchema', '--tables=vt_select_test0',
shard_0_replica1.tablet_alias])
self.assertEqual(len(s['TableDefinitions']), 1)
self.assertEqual(s['TableDefinitions'][0]['Name'], 'vt_select_test0')
# keyspace: try to apply a keyspace-wide schema change, should fail
# as the preflight would be different in both shards
out, err = utils.run_vtctl(['ApplySchemaKeyspace',
'-sql='+create_vt_select_test[2],
'test_keyspace'],
trap_output=True,
log_level='INFO',
raise_on_error=False)
if err.find('ApplySchemaKeyspace Shard 1 has inconsistent schema') == -1:
self.fail('Unexpected ApplySchemaKeyspace output: %s' % err)
if environment.topo_server_implementation == 'zookeeper':
utils.run_vtctl(['PurgeActions', '/zk/global/vt/keyspaces/test_keyspace/action'])
# shard 1: catch it up with simple updates
utils.run_vtctl(['ApplySchemaShard',
'-simple',
'-sql='+create_vt_select_test[0],
'test_keyspace/1'],
auto_log=True)
utils.run_vtctl(['ApplySchemaShard',
'-simple',
'-sql='+create_vt_select_test[1],
'test_keyspace/1'],
auto_log=True)
self._check_tables(shard_1_master, 2)
self._check_tables(shard_1_replica1, 2)
# keyspace: apply a keyspace-wide simple schema change, should work now
utils.run_vtctl(['ApplySchemaKeyspace',
'-simple',
'-sql='+create_vt_select_test[2],
'test_keyspace'],
auto_log=True)
# check all expected hosts have the change
self._check_tables(shard_0_master, 1) # was stuck a long time ago as scrap
self._check_tables(shard_0_replica1, 3) # current master
self._check_tables(shard_0_replica2, 3)
self._check_tables(shard_0_rdonly, 3)
self._check_tables(shard_0_backup, 3)
self._check_tables(shard_1_master, 3) # current master
self._check_tables(shard_1_replica1, 3)
# keyspace: apply a keyspace-wide complex schema change, should work too
utils.run_vtctl(['ApplySchemaKeyspace',
'-sql='+create_vt_select_test[3],
'test_keyspace'],
auto_log=True)
# check all expected hosts have the change:
# - master won't have it as it's a complex change
# - backup won't have it as IsReplicatingType is false
self._check_tables(shard_0_master, 1) # was stuck a long time ago as scrap
self._check_tables(shard_0_replica1, 3) # current master
self._check_tables(shard_0_replica2, 4)
self._check_tables(shard_0_rdonly, 4)
self._check_tables(shard_0_backup, 4)
self._check_tables(shard_1_master, 3) # current master
self._check_tables(shard_1_replica1, 4)
# now test action log pruning
if environment.topo_server_implementation == 'zookeeper':
oldLines = utils.zk_ls(shard_0_replica1.zk_tablet_path+'/actionlog')
oldCount = len(oldLines)
logging.debug("I have %u actionlog before", oldCount)
if oldCount <= 5:
self.fail('Not enough actionlog before: %u' % oldCount)
utils.run_vtctl(['PruneActionLogs', '-keep-count=5', '/zk/*/vt/tablets/*/actionlog'], auto_log=True)
newLines = utils.zk_ls(shard_0_replica1.zk_tablet_path+'/actionlog')
newCount = len(newLines)
logging.debug("I have %u actionlog after", newCount)
self.assertEqual(newCount, 5, 'Unexpected actionlog count after: %u' % newCount)
if oldLines[-5:] != newLines:
self.fail('Unexpected actionlog values:\n%s\n%s' %
(' '.join(oldLines[-5:]), ' '.join(newLines)))
utils.pause("Look at schema now!")
tablet.kill_tablets([shard_0_master, shard_0_replica1, shard_0_replica2,
shard_0_rdonly, shard_0_backup, shard_1_master,
shard_1_replica1])
if __name__ == '__main__':
utils.main()
|
bsd-3-clause
|
Ban3/Limnoria
|
plugins/Todo/test.py
|
4
|
6607
|
###
# Copyright (c) 2003-2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class TodoTestCase(PluginTestCase):
plugins = ('Todo', 'User', 'Config')
_user1 = 'foo!bar@baz'
_user2 = 'bar!foo@baz'
def setUp(self):
PluginTestCase.setUp(self)
# Create a valid user to use
self.prefix = self._user2
self.assertNotError('register testy oom')
self.prefix = self._user1
self.assertNotError('register tester moo')
def testTodo(self):
# Should not error, but no tasks yet.
self.assertNotError('todo')
self.assertRegexp('todo', 'You have no tasks')
# Add a task
self.assertNotError('todo add wash my car')
self.assertRegexp('todo', '#1: wash my car')
# Check that task
self.assertRegexp('todo 1',
'Todo for tester: wash my car \(Added .*?\)')
# Check that it lists all my tasks when given my name
self.assertResponse('todo tester',
'Todo for tester: #1: wash my car')
# Check pluralization
self.assertNotError('todo add moo')
self.assertRegexp('todo tester',
'Todos for tester: #1: wash my car and #2: moo')
# Check error
self.assertError('todo asfas')
self.assertRegexp('todo asfas',
'Error: \'asfas\' is not a valid task')
# Check priority sorting
self.assertNotError('todo setpriority 1 100')
self.assertNotError('todo setpriority 2 10')
self.assertRegexp('todo', '#2: moo and #1: wash my car')
# Check permissions
self.prefix = self._user2
self.assertError('todo tester')
self.assertNotRegexp('todo tester', 'task id')
self.prefix = self._user1
self.assertNotError('todo tester')
self.assertNotError('config plugins.Todo.allowThirdpartyReader True')
self.prefix = self._user2
self.assertNotError('todo tester')
self.prefix = self._user1
self.assertNotError('todo tester')
def testAddtodo(self):
self.assertNotError('todo add code a new plugin')
self.assertNotError('todo add --priority=1000 fix all bugs')
def testRemovetodo(self):
self.nick = 'testy'
self.prefix = self._user2
self.assertNotError('todo add do something')
self.assertNotError('todo add do something else')
self.assertNotError('todo add do something again')
self.assertNotError('todo remove 1')
self.assertNotError('todo 1')
self.nick = 'tester'
self.prefix = self._user1
self.assertNotError('todo add make something')
self.assertNotError('todo add make something else')
self.assertNotError('todo add make something again')
self.assertNotError('todo remove 1 3')
self.assertRegexp('todo 1', r'Inactive')
self.assertRegexp('todo 3', r'Inactive')
self.assertNotError('todo')
def testSearchtodo(self):
self.assertNotError('todo add task number one')
self.assertRegexp('todo search task*', '#1: task number one')
self.assertRegexp('todo search number', '#1: task number one')
self.assertNotError('todo add task number two is much longer than'
' task number one')
self.assertRegexp('todo search task*',
'#1: task number one and #2: task number two is '
'much longer than task number...')
self.assertError('todo search --regexp s/bustedregex')
self.assertRegexp('todo search --regexp m/task/',
'#1: task number one and #2: task number two is '
'much longer than task number...')
def testSetPriority(self):
self.assertNotError('todo add --priority=1 moo')
self.assertRegexp('todo 1',
'moo, priority: 1 \(Added at .*?\)')
self.assertNotError('setpriority 1 50')
self.assertRegexp('todo 1',
'moo, priority: 50 \(Added at .*?\)')
self.assertNotError('setpriority 1 0')
self.assertRegexp('todo 1', 'moo \(Added at .*?\)')
def testChangeTodo(self):
self.assertNotError('todo add moo')
self.assertError('todo change 1 asdfas')
self.assertError('todo change 1 m/asdfaf//')
self.assertNotError('todo change 1 s/moo/foo/')
self.assertRegexp('todo 1', 'Todo for tester: foo \(Added .*?\)')
def testActiveInactiveTodo(self):
self.assertNotError('todo add foo')
self.assertNotError('todo add bar')
self.assertRegexp('todo 1', 'Active')
self.assertRegexp('todo 2', 'Active')
self.assertNotError('todo remove 1')
self.assertRegexp('todo 1', 'Inactive')
self.assertRegexp('todo 2', 'Active')
self.assertNotError('todo remove 2')
self.assertRegexp('todo 2', 'Inactive')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
bsd-3-clause
|
keedio/sahara
|
sahara/tests/unit/service/validation/edp/test_job.py
|
12
|
7048
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara import exceptions as ex
from sahara.service.validations.edp import job as j
from sahara.tests.unit.service.validation import utils as u
from sahara.utils import edp
class TestJobValidation(u.ValidationTestCase):
def setUp(self):
super(TestJobValidation, self).setUp()
self._create_object_fun = j.check_mains_libs
self.scheme = j.JOB_SCHEMA
def test_bad_job_type_rejected(self):
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": "Jar",
},
bad_req_i=(1, "VALIDATION_ERROR",
"'Jar' is not one of " + str(edp.JOB_TYPES_ALL)))
@mock.patch('sahara.service.edp.api.get_job_binary')
def test_check_binaries(self, get_job_binary):
get_job_binary.return_value = "value"
j._check_binaries(["one", "two"])
get_job_binary.return_value = None
self.assertRaises(ex.NotFoundException,
j._check_binaries,
["one", "two"])
def test_mains_required_libs_optional(self):
msg = "%s flow requires main script"
spark_msg = "%s job requires main application jar"
values = ((edp.JOB_TYPE_PIG, msg),
(edp.JOB_TYPE_HIVE, msg),
(edp.JOB_TYPE_SPARK, spark_msg))
for job_type, msg in values:
self._test_mains_required_libs_optional(job_type,
msg % job_type)
def test_no_mains_libs_required(self):
for job_type in (edp.JOB_TYPE_JAVA, edp.JOB_TYPE_MAPREDUCE):
self._test_no_mains_libs_required(job_type)
def test_no_mains_libs_optional(self):
for job_type in (edp.JOB_TYPE_MAPREDUCE_STREAMING,):
self._test_no_mains_libs_optional(job_type)
@mock.patch('sahara.service.validations.edp.job._check_binaries')
def _test_mains_required_libs_optional(self, job_type, no_mains_msg,
_check_binaries):
libs = ["lib1", "lib2"]
mains = ["main"]
# No mains, should raise an exception
data = {
"name": "job",
"type": job_type,
"libs": libs
}
self._assert_create_object_validation(
data=data, bad_req_i=(1, "INVALID_DATA", no_mains_msg))
# Mains and libs overlap, should raise an exception
data = {
"name": "job",
"type": job_type,
"libs": libs,
"mains": libs[1:]
}
self._assert_create_object_validation(
data=data, bad_req_i=(1, "INVALID_DATA",
"'mains' and 'libs' overlap"))
# Everything is okay, mains and libs
data = {
"name": "job",
"type": job_type,
"libs": libs,
"mains": mains
}
self._assert_create_object_validation(data=data)
_check_binaries._assert_called_with(data["libs"])
_check_binaries._assert_called_with(data["mains"])
_check_binaries.reset_mock()
# Everything is okay, just mains
data = {
"name": "job",
"type": job_type,
"libs": [],
"mains": mains
}
self._assert_create_object_validation(data=data)
_check_binaries._assert_called_with(data["libs"])
_check_binaries._assert_called_with(data["mains"])
@mock.patch('sahara.service.validations.edp.job._check_binaries')
def _test_no_mains_libs_required(self, job_type, _check_binaries):
libs = ["lib1", "lib2"]
mains = ["main"]
# Just mains, should raise an exception
data = {
"name": "job",
"type": job_type,
"libs": [],
"mains": mains
}
self._assert_create_object_validation(
data=data, bad_req_i=(1, "INVALID_DATA",
"%s flow requires libs" % job_type))
# Libs and mains, should raise an exception
data = {
"name": "job",
"type": job_type,
"libs": libs,
"mains": mains
}
self._assert_create_object_validation(
data=data, bad_req_i=(1, "INVALID_DATA",
"%s flow does not use mains" % job_type))
# Everything is okay, libs but no mains
data = {
"name": "job",
"type": job_type,
"libs": libs,
"mains": []
}
self._assert_create_object_validation(data=data)
_check_binaries._assert_called_with(data["libs"])
_check_binaries._assert_called_with(data["mains"])
@mock.patch('sahara.service.validations.edp.job._check_binaries')
def _test_no_mains_libs_optional(self, job_type, _check_binaries):
libs = ["lib1", "lib2"]
mains = ["main"]
# Just mains, should raise an exception
data = {
"name": "job",
"type": job_type,
"libs": [],
"mains": mains
}
self._assert_create_object_validation(
data=data, bad_req_i=(1, "INVALID_DATA",
"%s flow does not use mains" % job_type))
# Libs and mains, should raise an exception
data = {
"name": "job",
"type": job_type,
"libs": libs,
"mains": mains
}
self._assert_create_object_validation(
data=data, bad_req_i=(1, "INVALID_DATA",
"%s flow does not use mains" % job_type))
# Everything is okay, libs but no mains
data = {
"name": "job",
"type": job_type,
"libs": libs,
"mains": []
}
self._assert_create_object_validation(data=data)
_check_binaries._assert_called_with(data["libs"])
_check_binaries._assert_called_with(data["mains"])
_check_binaries.reset_mock()
# Everything is okay, no libs or mains
data = {
"name": "job",
"type": job_type,
"libs": [],
"mains": []
}
self._assert_create_object_validation(data=data)
_check_binaries._assert_called_with(data["libs"])
_check_binaries._assert_called_with(data["mains"])
|
apache-2.0
|
gurneyalex/connector-magento
|
__unported__/magentoerpconnect/tests/__init__.py
|
3
|
1327
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_synchronization
from . import test_address_book
from . import test_export_invoice
from . import test_import_product_image
from . import test_related_action
from . import test_sale_order
fast_suite = [
]
checks = [
test_synchronization,
test_address_book,
test_export_invoice,
test_import_product_image,
test_related_action,
test_sale_order,
]
|
agpl-3.0
|
saumishr/django
|
django/contrib/messages/storage/base.py
|
399
|
6134
|
from django.conf import settings
from django.utils.encoding import force_unicode, StrAndUnicode
from django.contrib.messages import constants, utils
LEVEL_TAGS = utils.get_level_tags()
class Message(StrAndUnicode):
"""
Represents an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepares the message for serialization by forcing the ``message``
and ``extra_tags`` to unicode in case they are lazy translations.
Known "safe" types (None, int, etc.) are not converted (see Django's
``force_unicode`` implementation for details).
"""
self.message = force_unicode(self.message, strings_only=True)
self.extra_tags = force_unicode(self.extra_tags, strings_only=True)
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and \
self.message == other.message
def __unicode__(self):
return force_unicode(self.message)
def _get_tags(self):
label_tag = force_unicode(LEVEL_TAGS.get(self.level, ''),
strings_only=True)
extra_tags = force_unicode(self.extra_tags, strings_only=True)
if extra_tags and label_tag:
return u' '.join([extra_tags, label_tag])
elif extra_tags:
return extra_tags
elif label_tag:
return label_tag
return ''
tags = property(_get_tags)
class BaseStorage(object):
"""
This is the base backend for temporary message storage.
This is not a complete class; to be a usable storage backend, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super(BaseStorage, self).__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Returns a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieves a list of stored messages. Returns a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the backend was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError()
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages, returning a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError()
def _prepare_messages(self, messages):
"""
Prepares a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Stores all unread messages.
If the backend has yet to be iterated, previously stored messages will
be stored again. Otherwise, only messages added after the last
iteration will be stored.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queues a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Returns the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Sets a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
|
bsd-3-clause
|
Ervii/garage-time
|
garage/src/python/pants/backend/jvm/tasks/jvm_compile/analysis_tools.py
|
2
|
3954
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
import shutil
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
class AnalysisTools(object):
"""Analysis manipulation methods required by JvmCompile."""
_IVY_HOME_PLACEHOLDER = '/_IVY_HOME_PLACEHOLDER'
_PANTS_HOME_PLACEHOLDER = '/_PANTS_HOME_PLACEHOLDER'
def __init__(self, context, parser, analysis_cls):
self.parser = parser
self._java_home = context.java_home
self._ivy_home = context.ivy_home
self._pants_home = get_buildroot()
self._analysis_cls = analysis_cls
def split_to_paths(self, analysis_path, split_path_pairs, catchall_path=None):
"""Split an analysis file.
split_path_pairs: A list of pairs (split, output_path) where split is a list of source files
whose analysis is to be split out into output_path. The source files may either be
absolute paths, or relative to the build root.
If catchall_path is specified, the analysis for any sources not mentioned in the splits is
split out to that path.
"""
analysis = self.parser.parse_from_path(analysis_path)
splits, output_paths = zip(*split_path_pairs)
split_analyses = analysis.split(splits, catchall_path is not None)
if catchall_path is not None:
output_paths = output_paths + (catchall_path, )
for analysis, path in zip(split_analyses, output_paths):
analysis.write_to_path(path)
def merge_from_paths(self, analysis_paths, merged_analysis_path):
"""Merge multiple analysis files into one."""
analyses = [self.parser.parse_from_path(path) for path in analysis_paths]
merged_analysis = self._analysis_cls.merge(analyses)
merged_analysis.write_to_path(merged_analysis_path)
def relativize(self, src_analysis, relativized_analysis):
with temporary_dir() as tmp_analysis_dir:
tmp_analysis_file = os.path.join(tmp_analysis_dir, 'analysis.relativized')
# NOTE: We can't port references to deps on the Java home. This is because different JVM
# implementations on different systems have different structures, and there's not
# necessarily a 1-1 mapping between Java jars on different systems. Instead we simply
# drop those references from the analysis file.
#
# In practice the JVM changes rarely, and it should be fine to require a full rebuild
# in those rare cases.
rebasings = [
(self._java_home, None),
(self._ivy_home, self._IVY_HOME_PLACEHOLDER),
(self._pants_home, self._PANTS_HOME_PLACEHOLDER),
]
# Work on a tmpfile, for safety.
self._rebase_from_path(src_analysis, tmp_analysis_file, rebasings)
shutil.move(tmp_analysis_file, relativized_analysis)
def localize(self, src_analysis, localized_analysis):
with temporary_dir() as tmp_analysis_dir:
tmp_analysis_file = os.path.join(tmp_analysis_dir, 'analysis')
rebasings = [
(AnalysisTools._IVY_HOME_PLACEHOLDER, self._ivy_home),
(AnalysisTools._PANTS_HOME_PLACEHOLDER, self._pants_home),
]
# Work on a tmpfile, for safety.
self._rebase_from_path(src_analysis, tmp_analysis_file, rebasings)
shutil.move(tmp_analysis_file, localized_analysis)
def _rebase_from_path(self, input_analysis_path, output_analysis_path, rebasings):
"""Rebase file paths in an analysis file.
rebasings: A list of path prefix pairs [from_prefix, to_prefix] to rewrite.
to_prefix may be None, in which case matching paths are removed entirely.
"""
analysis = self.parser.parse_from_path(input_analysis_path)
analysis.write_to_path(output_analysis_path, rebasings=rebasings)
|
apache-2.0
|
whs/django
|
tests/m2m_through_regress/tests.py
|
71
|
11667
|
from io import StringIO
from django.contrib.auth.models import User
from django.core import management
from django.test import TestCase
from .models import (
Car, CarDriver, Driver, Group, Membership, Person, UserMembership,
)
class M2MThroughTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.jim = Person.objects.create(name="Jim")
cls.rock = Group.objects.create(name="Rock")
cls.roll = Group.objects.create(name="Roll")
cls.frank = User.objects.create_user("frank", "[email protected]", "password")
cls.jane = User.objects.create_user("jane", "[email protected]", "password")
# normal intermediate model
cls.bob_rock = Membership.objects.create(person=cls.bob, group=cls.rock)
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll, price=50)
cls.jim_rock = Membership.objects.create(person=cls.jim, group=cls.rock, price=50)
# intermediate model with custom id column
cls.frank_rock = UserMembership.objects.create(user=cls.frank, group=cls.rock)
cls.frank_roll = UserMembership.objects.create(user=cls.frank, group=cls.roll)
cls.jane_rock = UserMembership.objects.create(user=cls.jane, group=cls.rock)
def test_retrieve_reverse_m2m_items(self):
self.assertQuerysetEqual(
self.bob.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items(self):
self.assertQuerysetEqual(
self.roll.members.all(), [
"<Person: Bob>",
]
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
msg = (
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use m2m_through_regress.Membership's Manager "
"instead."
)
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.set([])
def test_cannot_use_setattr_on_forward_m2m_with_intermediary_model(self):
msg = (
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use m2m_through_regress.Membership's Manager "
"instead."
)
with self.assertRaisesMessage(AttributeError, msg):
self.roll.members.set([])
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
msg = (
"Cannot use create() on a ManyToManyField which specifies an "
"intermediary model. Use m2m_through_regress.Membership's "
"Manager instead."
)
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.create(name="Anne")
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
msg = (
"Cannot use create() on a ManyToManyField which specifies an "
"intermediary model. Use m2m_through_regress.Membership's "
"Manager instead."
)
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.create(name="Funk")
def test_retrieve_reverse_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.frank.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.roll.user_members.all(), [
"<User: frank>",
]
)
def test_join_trimming_forwards(self):
"""
Too many copies of the intermediate table aren't involved when doing a
join (#8046, #8254).
"""
self.assertQuerysetEqual(
self.rock.members.filter(membership__price=50), [
"<Person: Jim>",
]
)
def test_join_trimming_reverse(self):
self.assertQuerysetEqual(
self.bob.group_set.filter(membership__price=50), [
"<Group: Roll>",
]
)
class M2MThroughSerializationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.roll = Group.objects.create(name="Roll")
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll)
def test_serialization(self):
"m2m-through models aren't serialized as m2m fields. Refs #8134"
pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk}
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(
out.getvalue().strip(),
'[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": '
'100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": '
'"Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]'
% pks
)
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="xml", indent=2, stdout=out)
self.assertXMLEqual(out.getvalue().strip(), """
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="%(m_pk)s" model="m2m_through_regress.membership">
<field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
<field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="%(p_pk)s" model="m2m_through_regress.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="%(g_pk)s" model="m2m_through_regress.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
""".strip() % pks)
class ToFieldThroughTests(TestCase):
def setUp(self):
self.car = Car.objects.create(make="Toyota")
self.driver = Driver.objects.create(name="Ryan Briscoe")
CarDriver.objects.create(car=self.car, driver=self.driver)
# We are testing if wrong objects get deleted due to using wrong
# field value in m2m queries. So, it is essential that the pk
# numberings do not match.
# Create one intentionally unused driver to mix up the autonumbering
self.unused_driver = Driver.objects.create(name="Barney Gumble")
# And two intentionally unused cars.
self.unused_car1 = Car.objects.create(make="Trabant")
self.unused_car2 = Car.objects.create(make="Wartburg")
def test_to_field(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
def test_to_field_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
def test_to_field_clear_reverse(self):
self.driver.car_set.clear()
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
def test_to_field_clear(self):
self.car.drivers.clear()
self.assertQuerysetEqual(
self.car.drivers.all(), [])
# Low level tests for _add_items and _remove_items. We test these methods
# because .add/.remove aren't available for m2m fields with through, but
# through is the only way to set to_field currently. We do want to make
# sure these methods are ready if the ability to use .add or .remove with
# to_field relations is added some day.
def test_add(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
# Yikes - barney is going to drive...
self.car.drivers._add_items('car', 'driver', self.unused_driver)
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"]
)
def test_m2m_relations_unusable_on_null_to_field(self):
nullcar = Car(make=None)
msg = (
'"<Car: None>" needs to have a value for field "make" before this '
'many-to-many relationship can be used.'
)
with self.assertRaisesMessage(ValueError, msg):
nullcar.drivers.all()
def test_m2m_relations_unusable_on_null_pk_obj(self):
msg = (
"'Car' instance needs to have a primary key value before a "
"many-to-many relationship can be used."
)
with self.assertRaisesMessage(ValueError, msg):
Car(make='Ford').drivers.all()
def test_add_related_null(self):
nulldriver = Driver.objects.create(name=None)
msg = 'Cannot add "<Driver: None>": the value for field "driver" is None'
with self.assertRaisesMessage(ValueError, msg):
self.car.drivers._add_items('car', 'driver', nulldriver)
def test_add_reverse(self):
car2 = Car.objects.create(make="Honda")
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._add_items('driver', 'car', car2)
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>", "<Car: Honda>"],
ordered=False
)
def test_add_null_reverse(self):
nullcar = Car.objects.create(make=None)
msg = 'Cannot add "<Car: None>": the value for field "car" is None'
with self.assertRaisesMessage(ValueError, msg):
self.driver.car_set._add_items('driver', 'car', nullcar)
def test_add_null_reverse_related(self):
nulldriver = Driver.objects.create(name=None)
msg = (
'"<Driver: None>" needs to have a value for field "name" before '
'this many-to-many relationship can be used.'
)
with self.assertRaisesMessage(ValueError, msg):
nulldriver.car_set._add_items('driver', 'car', self.car)
def test_remove(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
self.car.drivers._remove_items('car', 'driver', self.driver)
self.assertQuerysetEqual(
self.car.drivers.all(), [])
def test_remove_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._remove_items('driver', 'car', self.car)
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
class ThroughLoadDataTestCase(TestCase):
fixtures = ["m2m_through"]
def test_sequence_creation(self):
"""
Sequences on an m2m_through are created for the through model, not a
phantom auto-generated m2m table (#11107).
"""
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(
out.getvalue().strip(),
'[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user"'
': 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, '
'"model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]'
)
|
bsd-3-clause
|
christer155/Django-facebook
|
docs/docs_env/Lib/site-packages/pip-1.0-py2.5.egg/pip/vcs/subversion.py
|
25
|
10027
|
import os
import re
from pip import call_subprocess
from pip.index import Link
from pip.util import rmtree, display_path
from pip.log import logger
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https')
bundle_file = 'svn-checkout.txt'
guide = ('# This was an svn checkout; to make it a checkout again run:\n'
'svn checkout --force -r %(rev)s %(url)s .\n')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
output = call_subprocess(
[self.cmd, 'info', location], show_stdout=False, extra_environ={'LANG': 'C'})
match = _svn_url_re.search(output)
if not match:
logger.warn('Cannot determine URL of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warn('Cannot determine revision of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return url, None
return url, match.group(1)
def parse_vcs_bundle_file(self, content):
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^-r\s*([^ ])?', line)
if not match:
return None, None
rev = match.group(1)
rest = line[match.end():].strip().split(None, 1)[0]
return rest, rev
return None, None
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
logger.notify('Exporting svn repository %s to %s' % (url, location))
logger.indent += 2
try:
if os.path.exists(location):
# Subversion doesn't like to check out over an existing directory
# --force fixes this, but was only added in svn 1.5
rmtree(location)
call_subprocess(
[self.cmd, 'export', url, location],
filter_stdout=self._filter, show_stdout=False)
finally:
logger.indent -= 2
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
## FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
## FIXME: should we warn?
continue
f = open(entries_fn)
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9') or data.startswith('10'):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
dirurl = data[0][3]
revs = [int(d[9]) for d in data if len(d)>9 and d[9]]+[0]
if revs:
localrev = max(revs)
else:
localrev = 0
elif data.startswith('<?xml'):
dirurl = _svn_xml_url_re.search(data).group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)]+[0]
if revs:
localrev = max(revs)
else:
localrev = 0
else:
logger.warn("Unrecognized .svn/entries format; skipping %s", base)
dirs[:] = []
continue
if base == location:
base_url = dirurl+'/' # save the root url
elif not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url_rev(self):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev = super(Subversion, self).get_url_rev()
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside setup.py
# we have to look up in the location until we find a real setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without finding setup.py
logger.warn("Could not find setup.py for directory %s (tried all parent directories)"
% orig_location)
return None
f = open(os.path.join(location, self.dirname, 'entries'))
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9') or data.startswith('10'):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
return data[0][3]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
return match.group(1) # get repository URL
else:
logger.warn("Unrecognized .svn/entries format in %s" % location)
# Or raise exception?
return None
def get_tag_revs(self, svn_tag_url):
stdout = call_subprocess(
[self.cmd, 'ls', '-v', svn_tag_url], show_stdout=False)
results = []
for line in stdout.splitlines():
parts = line.split()
rev = int(parts[0])
tag = parts[-1].strip('/')
results.append((tag, rev))
return results
def find_tag_match(self, rev, tag_revs):
best_match_rev = None
best_tag = None
for tag, tag_rev in tag_revs:
if (tag_rev > rev and
(best_match_rev is None or best_match_rev > tag_rev)):
# FIXME: Is best_match > tag_rev really possible?
# or is it a sign something is wacky?
best_match_rev = tag_rev
best_tag = tag
return best_tag
def get_src_requirement(self, dist, location, find_tags=False):
repo = self.get_url(location)
if repo is None:
return None
parts = repo.split('/')
## FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
if parts[-2] in ('tags', 'tag'):
# It's a tag, perfect!
full_egg_name = '%s-%s' % (egg_project_name, parts[-1])
elif parts[-2] in ('branches', 'branch'):
# It's a branch :(
full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev)
elif parts[-1] == 'trunk':
# Trunk :-/
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev)
if find_tags:
tag_url = '/'.join(parts[:-1]) + '/tags'
tag_revs = self.get_tag_revs(tag_url)
match = self.find_tag_match(rev, tag_revs)
if match:
logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match)
repo = '%s/%s' % (tag_url, match)
full_egg_name = '%s-%s' % (egg_project_name, match)
else:
# Don't know what it is
logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo)
full_egg_name = '%s-dev_r%s' % (egg_project_name, rev)
return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name)
vcs.register(Subversion)
|
bsd-3-clause
|
reiinakano/scikit-plot
|
scikitplot/plotters.py
|
1
|
44073
|
"""
This module contains a more flexible API for Scikit-plot users, exposing
simple functions to generate plots.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import warnings
import itertools
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.utils.multiclass import unique_labels
from sklearn.model_selection import learning_curve
from sklearn.base import clone
from sklearn.metrics import silhouette_score
from sklearn.metrics import silhouette_samples
from sklearn.utils import deprecated
from scipy import interp
from scikitplot.helpers import binary_ks_curve, validate_labels
warnings.warn("This module was deprecated in version 0.3.0 and its functions "
"are spread throughout different modules. Please check the "
"documentation and update your function calls as soon as "
"possible. This module will be removed in 0.4.0",
DeprecationWarning)
@deprecated('This will be removed in v0.4.0. Please use '
'scikitplot.metrics.plot_confusion_matrix instead.')
def plot_confusion_matrix(y_true, y_pred, labels=None, true_labels=None,
pred_labels=None, title=None, normalize=False,
hide_zeros=False, x_tick_rotation=0, ax=None,
figsize=None, cmap='Blues', title_fontsize="large",
text_fontsize="medium"):
"""Generates confusion matrix plot from predictions and true labels
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_pred (array-like, shape (n_samples)):
Estimated targets as returned by a classifier.
labels (array-like, shape (n_classes), optional): List of labels to
index the matrix. This may be used to reorder or select a subset
of labels. If none is given, those that appear at least once in
``y_true`` or ``y_pred`` are used in sorted order. (new in v0.2.5)
true_labels (array-like, optional): The true labels to display.
If none is given, then all of the labels are used.
pred_labels (array-like, optional): The predicted labels to display.
If none is given, then all of the labels are used.
title (string, optional): Title of the generated plot. Defaults to
"Confusion Matrix" if `normalize` is True. Else, defaults to
"Normalized Confusion Matrix.
normalize (bool, optional): If True, normalizes the confusion matrix
before plotting. Defaults to False.
hide_zeros (bool, optional): If True, does not plot cells containing a
value of zero. Defaults to False.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> rf = RandomForestClassifier()
>>> rf = rf.fit(X_train, y_train)
>>> y_pred = rf.predict(X_test)
>>> skplt.plot_confusion_matrix(y_test, y_pred, normalize=True)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_confusion_matrix.png
:align: center
:alt: Confusion matrix
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
cm = confusion_matrix(y_true, y_pred, labels=labels)
if labels is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(labels)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.around(cm, decimals=2)
cm[np.isnan(cm)] = 0.0
if true_labels is None:
true_classes = classes
else:
validate_labels(classes, true_labels, "true_labels")
true_label_indexes = np.in1d(classes, true_labels)
true_classes = classes[true_label_indexes]
cm = cm[true_label_indexes]
if pred_labels is None:
pred_classes = classes
else:
validate_labels(classes, pred_labels, "pred_labels")
pred_label_indexes = np.in1d(classes, pred_labels)
pred_classes = classes[pred_label_indexes]
cm = cm[:, pred_label_indexes]
if title:
ax.set_title(title, fontsize=title_fontsize)
elif normalize:
ax.set_title('Normalized Confusion Matrix', fontsize=title_fontsize)
else:
ax.set_title('Confusion Matrix', fontsize=title_fontsize)
image = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap(cmap))
plt.colorbar(mappable=image)
x_tick_marks = np.arange(len(pred_classes))
y_tick_marks = np.arange(len(true_classes))
ax.set_xticks(x_tick_marks)
ax.set_xticklabels(pred_classes, fontsize=text_fontsize,
rotation=x_tick_rotation)
ax.set_yticks(y_tick_marks)
ax.set_yticklabels(true_classes, fontsize=text_fontsize)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if not (hide_zeros and cm[i, j] == 0):
ax.text(j, i, cm[i, j],
horizontalalignment="center",
verticalalignment="center",
fontsize=text_fontsize,
color="white" if cm[i, j] > thresh else "black")
ax.set_ylabel('True label', fontsize=text_fontsize)
ax.set_xlabel('Predicted label', fontsize=text_fontsize)
ax.grid('off')
return ax
@deprecated('This will be removed in v0.4.0. Please use '
'scikitplot.metrics.plot_roc_curve instead.')
def plot_roc_curve(y_true, y_probas, title='ROC Curves',
curves=('micro', 'macro', 'each_class'),
ax=None, figsize=None, cmap='nipy_spectral',
title_fontsize="large", text_fontsize="medium"):
"""Generates the ROC curves from labels and predicted scores/probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"ROC Curves".
curves (array-like): A listing of which curves should be plotted on the
resulting plot. Defaults to `("micro", "macro", "each_class")`
i.e. "micro" for micro-averaged curve, "macro" for macro-averaged
curve
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> nb = GaussianNB()
>>> nb = nb.fit(X_train, y_train)
>>> y_probas = nb.predict_proba(X_test)
>>> skplt.plot_roc_curve(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_roc_curve.png
:align: center
:alt: ROC Curves
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
if 'micro' not in curves and 'macro' not in curves and \
'each_class' not in curves:
raise ValueError('Invalid argument for curves as it '
'only takes "micro", "macro", or "each_class"')
classes = np.unique(y_true)
probas = y_probas
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(len(classes)):
fpr[i], tpr[i], _ = roc_curve(y_true, probas[:, i],
pos_label=classes[i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
micro_key = 'micro'
i = 0
while micro_key in fpr:
i += 1
micro_key += str(i)
y_true = label_binarize(y_true, classes=classes)
if len(classes) == 2:
y_true = np.hstack((1 - y_true, y_true))
fpr[micro_key], tpr[micro_key], _ = roc_curve(y_true.ravel(),
probas.ravel())
roc_auc[micro_key] = auc(fpr[micro_key], tpr[micro_key])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[x] for x in range(len(classes))]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(len(classes)):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= len(classes)
macro_key = 'macro'
i = 0
while macro_key in fpr:
i += 1
macro_key += str(i)
fpr[macro_key] = all_fpr
tpr[macro_key] = mean_tpr
roc_auc[macro_key] = auc(fpr[macro_key], tpr[macro_key])
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
if 'each_class' in curves:
for i in range(len(classes)):
color = plt.cm.get_cmap(cmap)(float(i) / len(classes))
ax.plot(fpr[i], tpr[i], lw=2, color=color,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(classes[i], roc_auc[i]))
if 'micro' in curves:
ax.plot(fpr[micro_key], tpr[micro_key],
label='micro-average ROC curve '
'(area = {0:0.2f})'.format(roc_auc[micro_key]),
color='deeppink', linestyle=':', linewidth=4)
if 'macro' in curves:
ax.plot(fpr[macro_key], tpr[macro_key],
label='macro-average ROC curve '
'(area = {0:0.2f})'.format(roc_auc[macro_key]),
color='navy', linestyle=':', linewidth=4)
ax.plot([0, 1], [0, 1], 'k--', lw=2)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate', fontsize=text_fontsize)
ax.set_ylabel('True Positive Rate', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='lower right', fontsize=text_fontsize)
return ax
@deprecated('This will be removed in v0.4.0. Please use '
'scikitplot.metrics.plot_ks_statistic instead.')
def plot_ks_statistic(y_true, y_probas, title='KS Statistic Plot',
ax=None, figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Generates the KS Statistic plot from labels and scores/probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"KS Statistic Plot".
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the learning curve. If None, the plot is drawn on a new set of
axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> lr = LogisticRegression()
>>> lr = lr.fit(X_train, y_train)
>>> y_probas = lr.predict_proba(X_test)
>>> skplt.plot_ks_statistic(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_ks_statistic.png
:align: center
:alt: KS Statistic
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
classes = np.unique(y_true)
if len(classes) != 2:
raise ValueError('Cannot calculate KS statistic for data with '
'{} category/ies'.format(len(classes)))
probas = y_probas
# Compute KS Statistic curves
thresholds, pct1, pct2, ks_statistic, \
max_distance_at, classes = binary_ks_curve(y_true,
probas[:, 1].ravel())
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(thresholds, pct1, lw=3, label='Class {}'.format(classes[0]))
ax.plot(thresholds, pct2, lw=3, label='Class {}'.format(classes[1]))
idx = np.where(thresholds == max_distance_at)[0][0]
ax.axvline(max_distance_at, *sorted([pct1[idx], pct2[idx]]),
label='KS Statistic: {:.3f} at {:.3f}'.format(ks_statistic,
max_distance_at),
linestyle=':', lw=3, color='black')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.set_xlabel('Threshold', fontsize=text_fontsize)
ax.set_ylabel('Percentage below threshold', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='lower right', fontsize=text_fontsize)
return ax
@deprecated('This will be removed in v0.4.0. Please use '
'scikitplot.metrics.plot_precision_recall_curve instead.')
def plot_precision_recall_curve(y_true, y_probas,
title='Precision-Recall Curve',
curves=('micro', 'each_class'), ax=None,
figsize=None, cmap='nipy_spectral',
title_fontsize="large",
text_fontsize="medium"):
"""Generates the Precision Recall Curve from labels and probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
curves (array-like): A listing of which curves should be plotted on the
resulting plot. Defaults to `("micro", "each_class")`
i.e. "micro" for micro-averaged curve
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> nb = GaussianNB()
>>> nb = nb.fit(X_train, y_train)
>>> y_probas = nb.predict_proba(X_test)
>>> skplt.plot_precision_recall_curve(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_precision_recall_curve.png
:align: center
:alt: Precision Recall Curve
"""
y_true = np.array(y_true)
y_probas = np.array(y_probas)
classes = np.unique(y_true)
probas = y_probas
if 'micro' not in curves and 'each_class' not in curves:
raise ValueError('Invalid argument for curves as it '
'only takes "micro" or "each_class"')
# Compute Precision-Recall curve and area for each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(len(classes)):
precision[i], recall[i], _ = precision_recall_curve(
y_true, probas[:, i], pos_label=classes[i])
y_true = label_binarize(y_true, classes=classes)
if len(classes) == 2:
y_true = np.hstack((1 - y_true, y_true))
for i in range(len(classes)):
average_precision[i] = average_precision_score(y_true[:, i],
probas[:, i])
# Compute micro-average ROC curve and ROC area
micro_key = 'micro'
i = 0
while micro_key in precision:
i += 1
micro_key += str(i)
precision[micro_key], recall[micro_key], _ = precision_recall_curve(
y_true.ravel(), probas.ravel())
average_precision[micro_key] = average_precision_score(y_true, probas,
average='micro')
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
if 'each_class' in curves:
for i in range(len(classes)):
color = plt.cm.get_cmap(cmap)(float(i) / len(classes))
ax.plot(recall[i], precision[i], lw=2,
label='Precision-recall curve of class {0} '
'(area = {1:0.3f})'.format(classes[i],
average_precision[i]),
color=color)
if 'micro' in curves:
ax.plot(recall[micro_key], precision[micro_key],
label='micro-average Precision-recall curve '
'(area = {0:0.3f})'.format(average_precision[micro_key]),
color='navy', linestyle=':', linewidth=4)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='best', fontsize=text_fontsize)
return ax
@deprecated('This will be removed in v0.4.0. Please use '
'scikitplot.estimators.plot_feature_importances instead.')
def plot_feature_importances(clf, title='Feature Importance',
feature_names=None, max_num_features=20,
order='descending', x_tick_rotation=0, ax=None,
figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Generates a plot of a classifier's feature importances.
Args:
clf: Classifier instance that implements ``fit`` and ``predict_proba``
methods. The classifier must also have a ``feature_importances_``
attribute.
title (string, optional): Title of the generated plot. Defaults to
"Feature importances".
feature_names (None, :obj:`list` of string, optional): Determines the
feature names used to plot the feature importances. If None,
feature names will be numbered.
max_num_features (int): Determines the maximum number of features to
plot. Defaults to 20.
order ('ascending', 'descending', or None, optional): Determines the
order in which the feature importances are plotted. Defaults to
'descending'.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> rf = RandomForestClassifier()
>>> rf.fit(X, y)
>>> skplt.plot_feature_importances(
... rf, feature_names=['petal length', 'petal width',
... 'sepal length', 'sepal width'])
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_feature_importances.png
:align: center
:alt: Feature Importances
"""
if not hasattr(clf, 'feature_importances_'):
raise TypeError('"feature_importances_" attribute not in classifier. '
'Cannot plot feature importances.')
importances = clf.feature_importances_
if hasattr(clf, 'estimators_')\
and isinstance(clf.estimators_, list)\
and hasattr(clf.estimators_[0], 'feature_importances_'):
std = np.std([tree.feature_importances_ for tree in clf.estimators_],
axis=0)
else:
std = None
if order == 'descending':
indices = np.argsort(importances)[::-1]
elif order == 'ascending':
indices = np.argsort(importances)
elif order is None:
indices = np.array(range(len(importances)))
else:
raise ValueError('Invalid argument {} for "order"'.format(order))
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if feature_names is None:
feature_names = indices
else:
feature_names = np.array(feature_names)[indices]
max_num_features = min(max_num_features, len(importances))
ax.set_title(title, fontsize=title_fontsize)
if std is not None:
ax.bar(range(max_num_features),
importances[indices][:max_num_features], color='r',
yerr=std[indices][:max_num_features], align='center')
else:
ax.bar(range(max_num_features),
importances[indices][:max_num_features],
color='r', align='center')
ax.set_xticks(range(max_num_features))
ax.set_xticklabels(feature_names[:max_num_features],
rotation=x_tick_rotation)
ax.set_xlim([-1, max_num_features])
ax.tick_params(labelsize=text_fontsize)
return ax
@deprecated('This will be removed in v0.4.0. Please use '
'scikitplot.estimators.plot_learning_curve instead.')
def plot_learning_curve(clf, X, y, title='Learning Curve', cv=None,
train_sizes=None, n_jobs=1, scoring=None,
ax=None, figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Generates a plot of the train and test learning curves for a classifier.
Args:
clf: Classifier instance that implements ``fit`` and ``predict``
methods.
X (array-like, shape (n_samples, n_features)):
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y (array-like, shape (n_samples) or (n_samples, n_features)):
Target relative to X for classification or regression;
None for unsupervised learning.
title (string, optional): Title of the generated plot. Defaults to
"Learning Curve"
cv (int, cross-validation generator, iterable, optional): Determines
the cross-validation strategy to be used for splitting.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is
used.
train_sizes (iterable, optional): Determines the training sizes used to
plot the learning curve. If None, ``np.linspace(.1, 1.0, 5)`` is
used.
n_jobs (int, optional): Number of jobs to run in parallel. Defaults to
1.
scoring (string, callable or None, optional): default: None
A string (see scikit-learn model evaluation documentation) or a
scorerbcallable object / function with signature
scorer(estimator, X, y).
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> rf = RandomForestClassifier()
>>> skplt.plot_learning_curve(rf, X, y)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_learning_curve.png
:align: center
:alt: Learning Curve
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
if train_sizes is None:
train_sizes = np.linspace(.1, 1.0, 5)
ax.set_title(title, fontsize=title_fontsize)
ax.set_xlabel("Training examples", fontsize=text_fontsize)
ax.set_ylabel("Score", fontsize=text_fontsize)
train_sizes, train_scores, test_scores = learning_curve(
clf, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes,
scoring=scoring)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.grid()
ax.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color="r")
ax.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
ax.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
ax.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc="best", fontsize=text_fontsize)
return ax
@deprecated('This will be removed in v0.4.0. Please use '
'scikitplot.metrics.plot_silhouette instead.')
def plot_silhouette(clf, X, title='Silhouette Analysis', metric='euclidean',
copy=True, ax=None, figsize=None, cmap='nipy_spectral',
title_fontsize="large", text_fontsize="medium"):
"""Plots silhouette analysis of clusters using fit_predict.
Args:
clf: Clusterer instance that implements ``fit`` and ``fit_predict``
methods.
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
title (string, optional): Title of the generated plot. Defaults to
"Silhouette Analysis"
metric (string or callable, optional): The metric to use when
calculating distance between instances in a feature array.
If metric is a string, it must be one of the options allowed by
sklearn.metrics.pairwise.pairwise_distances. If X is
the distance array itself, use "precomputed" as the metric.
copy (boolean, optional): Determines whether ``fit`` is used on
**clf** or on a copy of **clf**.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> kmeans = KMeans(n_clusters=4, random_state=1)
>>> skplt.plot_silhouette(kmeans, X)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_silhouette.png
:align: center
:alt: Silhouette Plot
"""
if copy:
clf = clone(clf)
cluster_labels = clf.fit_predict(X)
n_clusters = len(set(cluster_labels))
silhouette_avg = silhouette_score(X, cluster_labels, metric=metric)
sample_silhouette_values = silhouette_samples(X, cluster_labels,
metric=metric)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.set_xlim([-0.1, 1])
ax.set_ylim([0, len(X) + (n_clusters + 1) * 10 + 10])
ax.set_xlabel('Silhouette coefficient values', fontsize=text_fontsize)
ax.set_ylabel('Cluster label', fontsize=text_fontsize)
y_lower = 10
for i in range(n_clusters):
ith_cluster_silhouette_values = sample_silhouette_values[
cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = plt.cm.get_cmap(cmap)(float(i) / n_clusters)
ax.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
ax.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i),
fontsize=text_fontsize)
y_lower = y_upper + 10
ax.axvline(x=silhouette_avg, color="red", linestyle="--",
label='Silhouette score: {0:0.3f}'.format(silhouette_avg))
ax.set_yticks([]) # Clear the y-axis labels / ticks
ax.set_xticks(np.arange(-0.1, 1.0, 0.2))
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc='best', fontsize=text_fontsize)
return ax
@deprecated('This will be removed in v0.4.0. Please use '
'scikitplot.cluster.plot_elbow_curve instead.')
def plot_elbow_curve(clf, X, title='Elbow Plot', cluster_ranges=None,
ax=None, figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Plots elbow curve of different values of K for KMeans clustering.
Args:
clf: Clusterer instance that implements ``fit`` and ``fit_predict``
methods and a ``score`` parameter.
X (array-like, shape (n_samples, n_features)):
Data to cluster, where n_samples is the number of samples and
n_features is the number of features.
title (string, optional): Title of the generated plot. Defaults to
"Elbow Plot"
cluster_ranges (None or :obj:`list` of int, optional): List of
n_clusters for which to plot the explained variances. Defaults to
``range(1, 12, 2)``.
copy (boolean, optional): Determines whether ``fit`` is used on
**clf** or on a copy of **clf**.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> kmeans = KMeans(random_state=1)
>>> skplt.plot_elbow_curve(kmeans, cluster_ranges=range(1, 11))
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_elbow_curve.png
:align: center
:alt: Elbow Curve
"""
if cluster_ranges is None:
cluster_ranges = range(1, 12, 2)
else:
cluster_ranges = sorted(cluster_ranges)
if not hasattr(clf, 'n_clusters'):
raise TypeError('"n_clusters" attribute not in classifier. '
'Cannot plot elbow method.')
clfs = []
for i in cluster_ranges:
current_clf = clone(clf)
setattr(current_clf, "n_clusters", i)
clfs.append(current_clf.fit(X).score(X))
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
ax.plot(cluster_ranges, np.absolute(clfs), 'b*-')
ax.grid(True)
ax.set_xlabel('Number of clusters', fontsize=text_fontsize)
ax.set_ylabel('Sum of Squared Errors', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
return ax
@deprecated('This will be removed in v0.4.0. Please use '
'scikitplot.decomposition.plot_pca_component_variance instead.')
def plot_pca_component_variance(clf, title='PCA Component Explained Variances',
target_explained_variance=0.75, ax=None,
figsize=None, title_fontsize="large",
text_fontsize="medium"):
"""Plots PCA components' explained variance ratios. (new in v0.2.2)
Args:
clf: PCA instance that has the ``explained_variance_ratio_`` attribute.
title (string, optional): Title of the generated plot. Defaults to
"PCA Component Explained Variances"
target_explained_variance (float, optional): Looks for the minimum
number of principal components that satisfies this value and
emphasizes it on the plot. Defaults to 0.75
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> pca = PCA(random_state=1)
>>> pca.fit(X)
>>> skplt.plot_pca_component_variance(pca)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_pca_component_variance.png
:align: center
:alt: PCA Component variances
"""
if not hasattr(clf, 'explained_variance_ratio_'):
raise TypeError('"clf" does not have explained_variance_ratio_ '
'attribute. Has the PCA been fitted?')
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
cumulative_sum_ratios = np.cumsum(clf.explained_variance_ratio_)
# Magic code for figuring out closest value to target_explained_variance
idx = np.searchsorted(cumulative_sum_ratios, target_explained_variance)
ax.plot(range(len(clf.explained_variance_ratio_) + 1),
np.concatenate(([0], np.cumsum(clf.explained_variance_ratio_))),
'*-')
ax.grid(True)
ax.set_xlabel('First n principal components', fontsize=text_fontsize)
ax.set_ylabel('Explained variance ratio of first n components',
fontsize=text_fontsize)
ax.set_ylim([-0.02, 1.02])
if idx < len(cumulative_sum_ratios):
ax.plot(idx+1, cumulative_sum_ratios[idx], 'ro',
label='{0:0.3f} Explained variance ratio for '
'first {1} components'.format(cumulative_sum_ratios[idx],
idx+1),
markersize=4, markeredgewidth=4)
ax.axhline(cumulative_sum_ratios[idx],
linestyle=':', lw=3, color='black')
ax.tick_params(labelsize=text_fontsize)
ax.legend(loc="best", fontsize=text_fontsize)
return ax
@deprecated('This will be removed in v0.4.0. Please use '
'scikitplot.decomposition.plot_pca_component_variance instead.')
def plot_pca_2d_projection(clf, X, y, title='PCA 2-D Projection', ax=None,
figsize=None, cmap='Spectral',
title_fontsize="large", text_fontsize="medium"):
"""Plots the 2-dimensional projection of PCA on a given dataset.
Args:
clf: Fitted PCA instance that can ``transform`` given data set into 2
dimensions.
X (array-like, shape (n_samples, n_features)):
Feature set to project, where n_samples is the number of samples
and n_features is the number of features.
y (array-like, shape (n_samples) or (n_samples, n_features)):
Target relative to X for labeling.
title (string, optional): Title of the generated plot. Defaults to
"PCA 2-D Projection"
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> pca = PCA(random_state=1)
>>> pca.fit(X)
>>> skplt.plot_pca_2d_projection(pca, X, y)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_pca_2d_projection.png
:align: center
:alt: PCA 2D Projection
"""
transformed_X = clf.transform(X)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.set_title(title, fontsize=title_fontsize)
classes = np.unique(np.array(y))
colors = plt.cm.get_cmap(cmap)(np.linspace(0, 1, len(classes)))
for label, color in zip(classes, colors):
ax.scatter(transformed_X[y == label, 0], transformed_X[y == label, 1],
alpha=0.8, lw=2, label=label, color=color)
ax.legend(loc='best', shadow=False, scatterpoints=1,
fontsize=text_fontsize)
ax.set_xlabel('First Principal Component', fontsize=text_fontsize)
ax.set_ylabel('Second Principal Component', fontsize=text_fontsize)
ax.tick_params(labelsize=text_fontsize)
return ax
|
mit
|
zasdfgbnm/tensorflow
|
tensorflow/python/ops/quantized_conv_ops_test.py
|
27
|
7519
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for quantized convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class Conv2DTest(test.TestCase):
def __init__(self, method_name="runTest"):
super(Conv2DTest, self).__init__(method_name)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = np.array([f for f in range(1, total_size_1 + 1)])
x1 = x1.astype(np.uint8).reshape(tensor_in_sizes)
x1_min = 0.0
x1_max = 255.0
x2 = np.array([f for f in range(1, total_size_2 + 1)]).astype(np.uint8)
x2 = x2.astype(np.uint8).reshape(filter_in_sizes)
x2_min = 0.0
x2_max = 255.0
with self.test_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtypes.quint8)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtypes.quint8)
conv = nn_ops.quantized_conv2d(
t1,
t2,
out_type=dtypes.qint32,
strides=[1, stride, stride, 1],
padding=padding,
min_input=x1_min,
max_input=x1_max,
min_filter=x2_min,
max_filter=x2_max)
value = sess.run(conv)
quantized_output = value[0]
output_min = value[1]
output_max = value[2]
float_output = self._QuantizedOutputToFloat(quantized_output, output_min,
output_max)
self.assertArrayNear(expected, float_output.flatten(), 1.0)
self.assertEqual(value[0].shape, conv[0].get_shape())
def _assertQuantizedArrayEquals(self, iarray1, iarray2):
for i1, i2 in zip(iarray1, iarray2):
self.assertTrue(i1 == i2)
def _QuantizedOutputToFloat(self, quantized, quantized_min, quantized_max):
number_of_bits = 32
number_of_steps = 1 << number_of_bits
range_adjust = (number_of_steps / (number_of_steps - 1.0))
quantized_range = ((quantized_max - quantized_min) * range_adjust)
range_scale = (quantized_range / number_of_steps)
lowest_quantized = -(1 << (number_of_bits - 1))
result = np.array([(quantized_min +
((float(x) - lowest_quantized) * range_scale))
for x in quantized.flatten()])
return result
def testConv2D1x1Filter(self):
# Our generated input is [batch, rows, cols, depth], and looks like this:
# (1,2,3) (4,5,6) (7,8,9)
# (10,11,12) (13,14,15) (16,17,18)
# The filter data is:
# (1,4,7) (2,5,8) (3,6,9)
# That means the calculations are:
# 1*1+2*4+3*7=30
# 1*2+2*5+3*8=36
# 1*3+2*6+3*9=42
# 4*1+5*4+6*7=66
# 4*2+5*5+6*8=81
# 4*3+5*6+6*9=96
# 7*1+5*8+6*9=102
# 7*2+8*5+9*8=126
# 7*3+8*6+9*9=150
# 10*1+11*4+12*7=138
# 10*2+11*5+12*8=171
# 10*3+11*6+12*9=204
# 13*1+14*4+15*7=174
# 13*2+14*5+15*8=216
# 13*3+14*6+15*9=258, clamped to 255
# 16*1+17*4+18*7=210
# 16*2+17*5+18*8=261, clamped to 255
# 16*3+17*6+18*9=312, clamped to 255
# Because the output shift is zero, we call the non-optimized reference
# path for the convolution.
expected_output = [
30, 36, 42, 66, 81, 96, 102, 126, 150, 138, 171, 204, 174, 216, 258,
210, 261, 312
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D2x2Filter(self):
# Our generated input is [batch, rows, cols, depth], and looks like this:
# (1,2,3) (4,5,6) (7,8,9)
# (10,11,12) (13,14,15) (16,17,18)
# The filter data is [filter_height, filter_width, depth, filter_count]:
# ( 1, 4, 7) (10, 13, 16)
# (19,22,25) (28, 31, 34)
# -
# ( 2, 5, 8) (11, 14, 17)
# (20,23,26) (29, 32, 35)
# -
# ( 3, 6, 9) (12, 15, 18)
# (21,24,27) (30, 33, 36)
# The raw accumulated totals are:
# 1*1+2*4+3*7+4*10+5*13+6*16+10*19+11*22+12*25+13*28+14*31+15*34=2271
# 1*2+2*5+3*8+4*11+5*14+6*17+10*20+11*23+12*26+13*29+14*32+15*35=2367
# 1*3+2*6+3*9+4*12+5*15+6*18+10*21+11*24+12*27+13*30+14*33+15*36=2463
# 4*1+5*4+6*7+7*10+8*13+9*16+13*19+14*22+15*25+16*28+17*31+18*34=2901
# 4*2+5*5+6*8+7*11+8*14+9*17+13*20+14*23+15*26+16*29+17*32+18*35=3033
# 4*3+5*6+6*9+7*12+8*15+9*18+13*21+14*24+15*27+16*30+17*33+18*36=3165
# The expected values are taken from the raw totals and rescaled to fit into
# eight bits.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
# With a shift of 21, we should execute the optimized path here.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2(self):
# With a shift of 21, we should execute the optimized path here.
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2Same(self):
# With a shift of 21, we should execute the optimized path here.
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
tedelhourani/ansible
|
lib/ansible/module_utils/infinibox.py
|
135
|
3673
|
# -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Gregory Shulov <[email protected]>,2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
HAS_INFINISDK = True
try:
from infinisdk import InfiniBox, core
except ImportError:
HAS_INFINISDK = False
from functools import wraps
from os import environ
from os import path
def api_wrapper(func):
""" Catch API Errors Decorator"""
@wraps(func)
def __wrapper(*args, **kwargs):
module = args[0]
try:
return func(*args, **kwargs)
except core.exceptions.APICommandException as e:
module.fail_json(msg=e.message)
except core.exceptions.SystemNotFoundException as e:
module.fail_json(msg=e.message)
except:
raise
return __wrapper
@api_wrapper
def get_system(module):
"""Return System Object or Fail"""
box = module.params['system']
user = module.params.get('user', None)
password = module.params.get('password', None)
if user and password:
system = InfiniBox(box, auth=(user, password))
elif environ.get('INFINIBOX_USER') and environ.get('INFINIBOX_PASSWORD'):
system = InfiniBox(box, auth=(environ.get('INFINIBOX_USER'), environ.get('INFINIBOX_PASSWORD')))
elif path.isfile(path.expanduser('~') + '/.infinidat/infinisdk.ini'):
system = InfiniBox(box)
else:
module.fail_json(msg="You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables or set username/password module arguments")
try:
system.login()
except Exception:
module.fail_json(msg="Infinibox authentication failed. Check your credentials")
return system
def infinibox_argument_spec():
"""Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
return dict(
system=dict(required=True),
user=dict(),
password=dict(no_log=True),
)
def infinibox_required_together():
"""Return the default list used for the required_together argument to AnsibleModule"""
return [['user', 'password']]
|
gpl-3.0
|
xapi-project/ffs
|
volume/org.xen.xapi.storage.ffs/sr.py
|
1
|
3289
|
#!/usr/bin/env python
import json
import os
import os.path
import sys
import urlparse
import xapi.storage.api.volume
from xapi.storage import log
class Implementation(xapi.storage.api.volume.SR_skeleton):
def probe(self, dbg, uri):
raise AssertionError("not implemented")
def attach(self, dbg, uri):
urlparse.urlparse(uri)
# mount the filesystem if necessary
return uri
def create(self, dbg, uri, name, description, configuration):
urlparse.urlparse(uri)
# this would be a good place to run mkfs
return
def destroy(self, dbg, sr):
# no need to destroy anything
return
def detach(self, dbg, sr):
# assume there is no need to unmount the filesystem
return
def ls(self, dbg, sr):
u = urlparse.urlparse(sr)
if not(os.path.isdir(u.path)):
raise xapi.storage.api.volume.Sr_not_attached(sr)
results = []
for filename in os.listdir(u.path):
if filename.endswith(".json"):
continue
path = os.path.join(u.path, filename)
if not(os.path.isfile(os.path.realpath(path))):
continue
uuid_ = None
name = filename
description = filename
keys = {}
if os.path.exists(path + ".json"):
with open(path + ".json", "r") as fd:
js = json.load(fd)
uuid_ = js["uuid"]
name = js["name"]
description = js["description"]
keys = js["keys"]
stat = os.stat(path)
virtual_size = stat.st_size
physical_utilisation = stat.st_blocks * 512
results.append({
"key": filename,
"uuid": uuid_,
"name": name,
"description": description,
"read_write": True,
"virtual_size": virtual_size,
"physical_utilisation": physical_utilisation,
"uri": ["raw+file://" + path],
"keys": keys
})
return results
def stat(self, dbg, sr):
u = urlparse.urlparse(sr)
statvfs = os.statvfs(u.path)
physical_size = statvfs.f_blocks * statvfs.f_frsize
free_size = statvfs.f_bfree * statvfs.f_frsize
return {
"sr": sr,
"name": "This SR has no name",
"description": "This SR has no description",
"total_space": physical_size,
"free_space": free_size,
"datasources": [],
"clustered": False,
"health": ["Healthy", ""]
}
if __name__ == "__main__":
log.log_call_argv()
cmd = xapi.storage.api.volume.SR_commandline(Implementation())
base = os.path.basename(sys.argv[0])
if base == 'SR.probe':
cmd.probe()
elif base == 'SR.attach':
cmd.attach()
elif base == 'SR.create':
cmd.create()
elif base == 'SR.destroy':
cmd.destroy()
elif base == 'SR.detach':
cmd.detach()
elif base == 'SR.ls':
cmd.ls()
elif base == 'SR.stat':
cmd.stat()
else:
raise xapi.storage.api.volume.Unimplemented(base)
|
lgpl-2.1
|
skycucumber/Messaging-Gateway
|
webapp/venv/lib/python2.7/site-packages/twisted/test/test_rebuild.py
|
41
|
7774
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, os
import types
from twisted.trial import unittest
from twisted.python import rebuild
import crash_test_dummy
f = crash_test_dummy.foo
class Foo: pass
class Bar(Foo): pass
class Baz(object): pass
class Buz(Bar, Baz): pass
class HashRaisesRuntimeError:
"""
Things that don't hash (raise an Exception) should be ignored by the
rebuilder.
@ivar hashCalled: C{bool} set to True when __hash__ is called.
"""
def __init__(self):
self.hashCalled = False
def __hash__(self):
self.hashCalled = True
raise RuntimeError('not a TypeError!')
unhashableObject = None # set in test_hashException
class RebuildTestCase(unittest.TestCase):
"""
Simple testcase for rebuilding, to at least exercise the code.
"""
def setUp(self):
self.libPath = self.mktemp()
os.mkdir(self.libPath)
self.fakelibPath = os.path.join(self.libPath, 'twisted_rebuild_fakelib')
os.mkdir(self.fakelibPath)
file(os.path.join(self.fakelibPath, '__init__.py'), 'w').close()
sys.path.insert(0, self.libPath)
def tearDown(self):
sys.path.remove(self.libPath)
def testFileRebuild(self):
from twisted.python.util import sibpath
import shutil, time
shutil.copyfile(sibpath(__file__, "myrebuilder1.py"),
os.path.join(self.fakelibPath, "myrebuilder.py"))
from twisted_rebuild_fakelib import myrebuilder
a = myrebuilder.A()
try:
object
except NameError:
pass
else:
from twisted.test import test_rebuild
b = myrebuilder.B()
class C(myrebuilder.B):
pass
test_rebuild.C = C
c = C()
i = myrebuilder.Inherit()
self.assertEqual(a.a(), 'a')
# necessary because the file has not "changed" if a second has not gone
# by in unix. This sucks, but it's not often that you'll be doing more
# than one reload per second.
time.sleep(1.1)
shutil.copyfile(sibpath(__file__, "myrebuilder2.py"),
os.path.join(self.fakelibPath, "myrebuilder.py"))
rebuild.rebuild(myrebuilder)
try:
object
except NameError:
pass
else:
b2 = myrebuilder.B()
self.assertEqual(b2.b(), 'c')
self.assertEqual(b.b(), 'c')
self.assertEqual(i.a(), 'd')
self.assertEqual(a.a(), 'b')
# more work to be done on new-style classes
# self.assertEqual(c.b(), 'c')
def testRebuild(self):
"""
Rebuilding an unchanged module.
"""
# This test would actually pass if rebuild was a no-op, but it
# ensures rebuild doesn't break stuff while being a less
# complex test than testFileRebuild.
x = crash_test_dummy.X('a')
rebuild.rebuild(crash_test_dummy, doLog=False)
# Instance rebuilding is triggered by attribute access.
x.do()
self.failUnlessIdentical(x.__class__, crash_test_dummy.X)
self.failUnlessIdentical(f, crash_test_dummy.foo)
def testComponentInteraction(self):
x = crash_test_dummy.XComponent()
x.setAdapter(crash_test_dummy.IX, crash_test_dummy.XA)
oldComponent = x.getComponent(crash_test_dummy.IX)
rebuild.rebuild(crash_test_dummy, 0)
newComponent = x.getComponent(crash_test_dummy.IX)
newComponent.method()
self.assertEqual(newComponent.__class__, crash_test_dummy.XA)
# Test that a duplicate registerAdapter is not allowed
from twisted.python import components
self.failUnlessRaises(ValueError, components.registerAdapter,
crash_test_dummy.XA, crash_test_dummy.X,
crash_test_dummy.IX)
def testUpdateInstance(self):
global Foo, Buz
b = Buz()
class Foo:
def foo(self):
pass
class Buz(Bar, Baz):
x = 10
rebuild.updateInstance(b)
assert hasattr(b, 'foo'), "Missing method on rebuilt instance"
assert hasattr(b, 'x'), "Missing class attribute on rebuilt instance"
def testBananaInteraction(self):
from twisted.python import rebuild
from twisted.spread import banana
rebuild.latestClass(banana.Banana)
def test_hashException(self):
"""
Rebuilding something that has a __hash__ that raises a non-TypeError
shouldn't cause rebuild to die.
"""
global unhashableObject
unhashableObject = HashRaisesRuntimeError()
def _cleanup():
global unhashableObject
unhashableObject = None
self.addCleanup(_cleanup)
rebuild.rebuild(rebuild)
self.assertEqual(unhashableObject.hashCalled, True)
class NewStyleTestCase(unittest.TestCase):
"""
Tests for rebuilding new-style classes of various sorts.
"""
def setUp(self):
self.m = types.ModuleType('whipping')
sys.modules['whipping'] = self.m
def tearDown(self):
del sys.modules['whipping']
del self.m
def test_slots(self):
"""
Try to rebuild a new style class with slots defined.
"""
classDefinition = (
"class SlottedClass(object):\n"
" __slots__ = ['a']\n")
exec classDefinition in self.m.__dict__
inst = self.m.SlottedClass()
inst.a = 7
exec classDefinition in self.m.__dict__
rebuild.updateInstance(inst)
self.assertEqual(inst.a, 7)
self.assertIdentical(type(inst), self.m.SlottedClass)
if sys.version_info < (2, 6):
test_slots.skip = "__class__ assignment for class with slots is only available starting Python 2.6"
def test_errorSlots(self):
"""
Try to rebuild a new style class with slots defined: this should fail.
"""
classDefinition = (
"class SlottedClass(object):\n"
" __slots__ = ['a']\n")
exec classDefinition in self.m.__dict__
inst = self.m.SlottedClass()
inst.a = 7
exec classDefinition in self.m.__dict__
self.assertRaises(rebuild.RebuildError, rebuild.updateInstance, inst)
if sys.version_info >= (2, 6):
test_errorSlots.skip = "__class__ assignment for class with slots should work starting Python 2.6"
def test_typeSubclass(self):
"""
Try to rebuild a base type subclass.
"""
classDefinition = (
"class ListSubclass(list):\n"
" pass\n")
exec classDefinition in self.m.__dict__
inst = self.m.ListSubclass()
inst.append(2)
exec classDefinition in self.m.__dict__
rebuild.updateInstance(inst)
self.assertEqual(inst[0], 2)
self.assertIdentical(type(inst), self.m.ListSubclass)
def test_instanceSlots(self):
"""
Test that when rebuilding an instance with a __slots__ attribute, it
fails accurately instead of giving a L{rebuild.RebuildError}.
"""
classDefinition = (
"class NotSlottedClass(object):\n"
" pass\n")
exec classDefinition in self.m.__dict__
inst = self.m.NotSlottedClass()
inst.__slots__ = ['a']
classDefinition = (
"class NotSlottedClass:\n"
" pass\n")
exec classDefinition in self.m.__dict__
# Moving from new-style class to old-style should fail.
self.assertRaises(TypeError, rebuild.updateInstance, inst)
|
gpl-2.0
|
YGIronMan/python
|
zentst/0023/mysite/wsgi.py
|
102
|
1419
|
"""
WSGI config for mysite project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "mysite.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
mit
|
acsone/account-invoicing
|
account_invoice_shipping_address/sale.py
|
34
|
1453
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Domsense s.r.l. (<http://www.domsense.com>).
# Copyright (C) 2013 Andrea Cometa Perito Informatico (www.andreacometa.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class sale_order(orm.Model):
_inherit = 'sale.order'
def _prepare_invoice(self, cr, uid, order, lines, context=None):
res = super(sale_order, self)._prepare_invoice(
cr, uid, order, lines, context=context
)
res.update({
'address_shipping_id': order.partner_shipping_id.id, })
return res
|
agpl-3.0
|
jhawkesworth/ansible
|
lib/ansible/modules/network/fortios/fortios_log_memory_global_setting.py
|
23
|
8183
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_memory_global_setting
short_description: Global settings for memory logging in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_memory feature and global_setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_memory_global_setting:
description:
- Global settings for memory logging.
default: null
suboptions:
full-final-warning-threshold:
description:
- Log full final warning threshold as a percent (3 - 100, default = 95).
full-first-warning-threshold:
description:
- Log full first warning threshold as a percent (1 - 98, default = 75).
full-second-warning-threshold:
description:
- Log full second warning threshold as a percent (2 - 99, default = 90).
max-size:
description:
- Maximum amount of memory that can be used for memory logging in bytes.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Global settings for memory logging.
fortios_log_memory_global_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_memory_global_setting:
full-final-warning-threshold: "3"
full-first-warning-threshold: "4"
full-second-warning-threshold: "5"
max-size: "6"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_memory_global_setting_data(json):
option_list = ['full-final-warning-threshold', 'full-first-warning-threshold', 'full-second-warning-threshold',
'max-size']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def log_memory_global_setting(data, fos):
vdom = data['vdom']
log_memory_global_setting_data = data['log_memory_global_setting']
flattened_data = flatten_multilists_attributes(log_memory_global_setting_data)
filtered_data = filter_log_memory_global_setting_data(flattened_data)
return fos.set('log.memory',
'global-setting',
data=filtered_data,
vdom=vdom)
def fortios_log_memory(data, fos):
login(data)
if data['log_memory_global_setting']:
resp = log_memory_global_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_memory_global_setting": {
"required": False, "type": "dict",
"options": {
"full-final-warning-threshold": {"required": False, "type": "int"},
"full-first-warning-threshold": {"required": False, "type": "int"},
"full-second-warning-threshold": {"required": False, "type": "int"},
"max-size": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_memory(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
karllessard/tensorflow
|
tensorflow/python/framework/kernels_test.py
|
51
|
1543
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for querying registered kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import kernels
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class GetAllRegisteredKernelsTest(test_util.TensorFlowTestCase):
def testFindsAtLeastOneKernel(self):
kernel_list = kernels.get_all_registered_kernels()
self.assertGreater(len(kernel_list.kernel), 0)
class GetRegisteredKernelsForOp(test_util.TensorFlowTestCase):
def testFindsAtLeastOneKernel(self):
kernel_list = kernels.get_registered_kernels_for_op("KernelLabel")
self.assertGreater(len(kernel_list.kernel), 0)
self.assertEqual(kernel_list.kernel[0].op, "KernelLabel")
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
kubernetes-incubator/kargo
|
library/kube.py
|
2
|
8694
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = """
---
module: kube
short_description: Manage Kubernetes Cluster
description:
- Create, replace, remove, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file(s).
- To operate on several files this can accept a comma separated list of files or a list of files.
aliases: [ 'files', 'file', 'filenames' ]
kubectl:
required: false
default: null
description:
- The path to the kubectl bin
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating or updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
- name: test nginx and postgresql are present
kube: files=/tmp/nginx.yml,/tmp/postgresql.yml
- name: test nginx and postgresql are present
kube:
files:
- /tmp/nginx.yml
- /tmp/postgresql.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.kubectl = module.params.get('kubectl')
if self.kubectl is None:
self.kubectl = module.get_bin_path('kubectl', True)
self.base_cmd = [self.kubectl]
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.name = module.params.get('name')
self.filename = [f.strip() for f in module.params.get('filename') or []]
self.resource = module.params.get('resource')
self.label = module.params.get('label')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True, force=True):
if check and self.exists():
return []
cmd = ['apply']
if force:
cmd.append('--force')
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + ','.join(self.filename))
return self._execute(cmd)
def replace(self, force=True):
cmd = ['apply']
if force:
cmd.append('--force')
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + ','.join(self.filename))
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def exists(self):
cmd = ['get']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
else:
if not self.resource:
self.module.fail_json(msg='resource required without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
cmd.append('--no-headers')
result = self._execute_nofail(cmd)
if not result:
return False
return True
# TODO: This is currently unused, perhaps convert to 'scale' with a replicas param?
def stop(self):
if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + ','.join(self.filename))
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(type='list', aliases=['files', 'file', 'filenames']),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
kubectl=dict(),
force=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
),
mutually_exclusive=[['filename', 'list']]
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create(check=False)
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
elif state == 'latest':
result = manager.replace()
else:
module.fail_json(msg='Unrecognized state %s.' % state)
module.exit_json(changed=changed,
msg='success: %s' % (' '.join(result))
)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.