repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
robwebset/script.tvtunes | resources/lib/settings.py | 1 | 19968 | # -*- coding: utf-8 -*-
import os
import unicodedata
import xbmc
import xbmcaddon
import xbmcvfs
import xbmcgui
ADDON = xbmcaddon.Addon(id='script.tvtunes')
ADDON_ID = ADDON.getAddonInfo('id')
# Common logging module
def log(txt, debug_logging_enabled=True, loglevel=xbmc.LOGDEBUG):
if ((ADDON.getSetting("logEnabled") == "true") and debug_logging_enabled) or (loglevel != xbmc.LOGDEBUG):
if isinstance(txt, str):
txt = txt.decode("utf-8")
message = u'%s: %s' % (ADDON_ID, txt)
xbmc.log(msg=message.encode("utf-8"), level=loglevel)
def normalize_string(text):
try:
text = text.replace(":", "")
text = text.replace("/", "-")
text = text.replace("\\", "-")
text = text.replace("<", "")
text = text.replace(">", "")
text = text.replace("*", "")
text = text.replace("?", "")
text = text.replace('|', "")
text = text.strip()
# Remove dots from the last character as windows can not have directories
# with dots at the end
text = text.rstrip('.')
text = unicodedata.normalize('NFKD', unicode(text, 'utf-8')).encode('ascii', 'ignore')
except:
pass
return text
# There has been problems with calling join with non ascii characters,
# so we have this method to try and do the conversion for us
def os_path_join(dir, file):
# Check if it ends in a slash
if dir.endswith("/") or dir.endswith("\\"):
# Remove the slash character
dir = dir[:-1]
# Convert each argument - if an error, then it will use the default value
# that was passed in
try:
dir = dir.decode("utf-8")
except:
pass
try:
file = file.decode("utf-8")
except:
pass
return os.path.join(dir, file)
# There has been problems with calling isfile with non ascii characters,
# so we have this method to try and do the conversion for us
def os_path_isfile(workingPath):
# Support special paths like smb:// means that we can not just call
# os.path.isfile as it will return false even if it is a file
# (A bit of a shame - but that's the way it is)
if workingPath.startswith("smb://") or workingPath.startswith("nfs://") or workingPath.startswith("afp://"):
# The test for the file existing will not work, so return true
return True
# Convert each argument - if an error, then it will use the default value
# that was passed in
try:
workingPath = workingPath.decode("utf-8")
except:
pass
try:
return os.path.isfile(workingPath)
except:
return False
# Splits a path the same way as os.path.split but supports paths of a different
# OS than that being run on
def os_path_split(fullpath):
# Check if it ends in a slash
if fullpath.endswith("/") or fullpath.endswith("\\"):
# Remove the slash character
fullpath = fullpath[:-1]
try:
slash1 = fullpath.rindex("/")
except:
slash1 = -1
try:
slash2 = fullpath.rindex("\\")
except:
slash2 = -1
# Parse based on the last type of slash in the string
if slash1 > slash2:
return fullpath.rsplit("/", 1)
return fullpath.rsplit("\\", 1)
# Get the contents of the directory
def list_dir(dirpath):
# There is a problem with the afp protocol that means if a directory not ending
# in a / is given, an error happens as it just appends the filename to the end
# without actually checking there is a directory end character
# http://forum.xbmc.org/showthread.php?tid=192255&pid=1681373#pid1681373
if dirpath.startswith('afp://') and (not dirpath.endswith('/')):
dirpath = os_path_join(dirpath, '/')
return xbmcvfs.listdir(dirpath)
# Checks if a directory exists (Do not use for files)
def dir_exists(dirpath):
# There is an issue with password protected smb shares, in that they seem to
# always return false for a directory exists call, so if we have a smb with
# a password and user name, then we return true
if Settings.isSmbEnabled() and ('@' in dirpath):
return True
directoryPath = dirpath
# The xbmcvfs exists interface require that directories end in a slash
# It used to be OK not to have the slash in Gotham, but it is now required
if (not directoryPath.endswith("/")) and (not directoryPath.endswith("\\")):
dirSep = "/"
if "\\" in directoryPath:
dirSep = "\\"
directoryPath = "%s%s" % (directoryPath, dirSep)
return xbmcvfs.exists(directoryPath)
################################################################
# Class to make it easier to see which screen is being displayed
################################################################
class WindowShowing():
@staticmethod
def isHome():
return xbmc.getCondVisibility("Window.IsVisible(home)")
@staticmethod
def isVideoLibrary():
# For now check for both videolibrary (before v17) and videos (v17 onwards)
return xbmc.getCondVisibility("Window.IsVisible(videos)") or xbmc.getCondVisibility("Window.IsVisible(videolibrary)") or WindowShowing.isTvTunesOverrideTvShows() or WindowShowing.isTvTunesOverrideMovie() or WindowShowing.isTvTunesOverrideContinuePlaying()
@staticmethod
def isMovieInformation():
return xbmc.getCondVisibility("Window.IsVisible(movieinformation)") or WindowShowing.isTvTunesOverrideMovie()
@staticmethod
def isTvShows():
return xbmc.getCondVisibility("Container.Content(tvshows)") or (xbmc.getInfoLabel("ListItem.dbtype") == 'tvshow') or WindowShowing.isTvTunesOverrideTvShows()
@staticmethod
def isSeasons():
return xbmc.getCondVisibility("Container.Content(Seasons)") or (xbmc.getInfoLabel("ListItem.dbtype") == 'season') or WindowShowing.isTvTunesOverrideTvShows()
@staticmethod
def isEpisodes():
return xbmc.getCondVisibility("Container.Content(Episodes)") or (xbmc.getInfoLabel("ListItem.dbtype") == 'episode') or WindowShowing.isTvTunesOverrideTvShows()
@staticmethod
def isMovies():
return xbmc.getCondVisibility("Container.Content(movies)") or (xbmc.getInfoLabel("ListItem.dbtype") == 'movie') or WindowShowing.isTvTunesOverrideMovie()
@staticmethod
def isScreensaver():
return xbmc.getCondVisibility("System.ScreenSaverActive")
@staticmethod
def isShutdownMenu():
return xbmc.getCondVisibility("Window.IsVisible(shutdownmenu)")
@staticmethod
def isMusicSection():
inMusicSection = False
# Only record being in the music section if we have it enabled in the settings
if Settings.isPlayMusicList():
if xbmc.getCondVisibility("Container.Content(albums)"):
inMusicSection = True
elif xbmc.getCondVisibility("Container.Content(artists)"):
inMusicSection = True
return inMusicSection
@staticmethod
def isTvTunesOverrideTvShows():
isOverride = False
try:
# If there is a problem with a skin where there is no current window Id, avoid the exception
win = xbmcgui.Window(xbmcgui.getCurrentWindowId())
if win.getProperty("TvTunesSupported").lower() == "tvshows":
isOverride = True
except:
isOverride = False
return isOverride
@staticmethod
def isTvTunesOverrideMovie():
isOverride = False
try:
# If there is a problem with a skin where there is no current window Id, avoid the exception
win = xbmcgui.Window(xbmcgui.getCurrentWindowId())
if win.getProperty("TvTunesSupported").lower() == "movies":
isOverride = True
except:
isOverride = False
return isOverride
@staticmethod
def isTvTunesOverrideContinuePlaying():
# Check the home screen for the forced continue playing flag
if xbmcgui.Window(12000).getProperty("TvTunesContinuePlaying").lower() == "true":
# Never allow continues playing on the Home Screen
if WindowShowing.isHome():
# An addon may have forgotten to undet the flag, or crashed
# force the unsetting of the flag
log("WindowShowing: Removing TvTunesContinuePlaying property when on Home screen")
xbmcgui.Window(12000).clearProperty("TvTunesContinuePlaying")
return False
# Only pay attention to the forced playing if there is actually media playing
if xbmc.Player().isPlaying():
return True
return False
# Works out if the custom window option to play the TV Theme is set
# and we have just opened a dialog over that
@staticmethod
def isTvTunesOverrideContinuePrevious():
# Check the master override that forces the existing playing theme
if WindowShowing.isTvTunesOverrideContinuePlaying():
return True
if WindowShowing.isTvTunesOverrideTvShows() or WindowShowing.isTvTunesOverrideMovie():
# Check if this is a dialog, in which case we just continue playing
try:
dialogid = xbmcgui.getCurrentWindowDialogId()
except:
dialogid = 9999
if dialogid != 9999:
# Is a dialog so return True
return True
return False
@staticmethod
def isRecentEpisodesAdded():
return xbmc.getInfoLabel("container.folderpath") == "videodb://recentlyaddedepisodes/"
@staticmethod
def isTvShowTitles():
showingTvShowTitles = (xbmc.getInfoLabel("container.folderpath") == "videodb://tvshows/titles/")
# There is a case where the user may have created a smart playlist that then
# groups together all the TV Shows, if they also have the option to play themes
# while browsing TV Shows enabled, then we need to return True for this case
if not showingTvShowTitles:
# Check if we are viewing a video playlist
if 'special://profile/playlists/video/' in xbmc.getInfoLabel("container.folderpath"):
# Check if what is being showed is actually TV Shows
showingTvShowTitles = WindowShowing.isTvShows()
elif (xbmc.getInfoLabel("ListItem.dbtype") == 'tvshow'):
showingTvShowTitles = True
return showingTvShowTitles
@staticmethod
def isMusicVideoTitles():
return xbmc.getInfoLabel("container.folderpath") == "videodb://musicvideos/"
@staticmethod
def isPluginPath():
currentPath = xbmc.getInfoLabel("ListItem.Path")
if "plugin://" in currentPath:
# There is a special case for Emby.Kodi that supports TvTunes
# https://github.com/MediaBrowser/Emby.Kodi
# So we pretend that isn't a plugin as long as Custom Path is set
if ("plugin.video.emby" in currentPath) and Settings.isCustomPathEnabled():
return False
return True
return False
@staticmethod
def isMovieSet():
folderPathId = "videodb://movies/sets/"
return xbmc.getCondVisibility("!IsEmpty(ListItem.DBID) + SubString(ListItem.Path," + folderPathId + ",left)")
##############################
# Stores Various Settings
##############################
class Settings():
@staticmethod
def reloadSettings():
# Force the reload of the settings to pick up any new values
global ADDON
ADDON = xbmcaddon.Addon(id='script.tvtunes')
# Checks if the given file is names as a video file
@staticmethod
def isVideoFile(filename):
if filename in [None, ""]:
return False
if filename.lower().endswith('.mp4'):
return True
if filename.lower().endswith('.mkv'):
return True
if filename.lower().endswith('.avi'):
return True
if filename.lower().endswith('.mov'):
return True
if filename.lower().endswith('.m2ts'):
return True
if filename.lower().endswith('.webm'):
return True
return False
@staticmethod
def isThemePlayingEnabled():
return ADDON.getSetting("enableThemePlaying") == 'true'
@staticmethod
def isCustomPathEnabled():
return ADDON.getSetting("custom_path_enable") == 'true'
@staticmethod
def getCustomPath():
return ADDON.getSetting("custom_path").decode("utf-8")
@staticmethod
def getThemeVolume():
return int(float(ADDON.getSetting("volume")))
@staticmethod
def isLoop():
return ADDON.getSetting("loop") == 'true'
@staticmethod
def isFadeOut():
return ADDON.getSetting("fadeOut") == 'true'
@staticmethod
def isFadeIn():
return ADDON.getSetting("fadeIn") == 'true'
@staticmethod
def isSmbEnabled():
return ADDON.getSetting("smb_share") == 'true'
@staticmethod
def getSmbUser():
if ADDON.getSetting("smb_login"):
return ADDON.getSetting("smb_login")
else:
return "guest"
@staticmethod
def getSmbPassword():
if ADDON.getSetting("smb_psw"):
return ADDON.getSetting("smb_psw")
else:
return "guest"
# Calculates the regular expression to use to search for theme files
@staticmethod
def getThemeFileRegEx(searchDir=None, extensionOnly=False, audioOnly=False, videoOnly=False):
fileTypes = ""
if not videoOnly:
fileTypes = "mp3" # mp3 is the default that is always supported
if(ADDON.getSetting("wma") == 'true'):
fileTypes = fileTypes + "|wma"
if(ADDON.getSetting("flac") == 'true'):
fileTypes = fileTypes + "|flac"
if(ADDON.getSetting("m4a") == 'true'):
fileTypes = fileTypes + "|m4a"
if(ADDON.getSetting("wav") == 'true'):
fileTypes = fileTypes + "|wav"
if(ADDON.getSetting("wav") == 'true'):
fileTypes = fileTypes + "|wav"
if not audioOnly:
videoFileTypes = Settings.getVideoThemeFileExtensions()
if videoFileTypes not in [None, ""]:
if len(fileTypes) > 0:
fileTypes = fileTypes + '|'
fileTypes = fileTypes + videoFileTypes
themeRegEx = '(theme[ _A-Za-z0-9.-]*.(' + fileTypes + ')$)'
# If using the directory method then remove the requirement to have "theme" in the name
if (searchDir is not None) and Settings.isThemeDirEnabled():
# Make sure this is checking the theme directory, not it's parent
if searchDir.endswith(Settings.getThemeDirectory()):
extensionOnly = True
# See if we do not want the theme keyword
if extensionOnly:
themeRegEx = '(.(' + fileTypes + ')$)'
return themeRegEx
# Calculates the regular expression to use to search for trailer video files
@staticmethod
def getTrailerFileRegEx():
fileTypes = ""
videoFileTypes = Settings.getVideoThemeFileExtensions()
if videoFileTypes not in [None, ""]:
if len(fileTypes) > 0:
fileTypes = fileTypes + '|'
fileTypes = fileTypes + videoFileTypes
return '(-trailer[ _A-Za-z0-9.-]*.(' + fileTypes + ')$)'
@staticmethod
def getVideoThemeFileExtensions():
fileTypes = []
if(ADDON.getSetting("mp4") == 'true'):
fileTypes.append("mp4")
if(ADDON.getSetting("mkv") == 'true'):
fileTypes.append("mkv")
if(ADDON.getSetting("avi") == 'true'):
fileTypes.append("avi")
if(ADDON.getSetting("mov") == 'true'):
fileTypes.append("mov")
if(ADDON.getSetting("m2ts") == 'true'):
fileTypes.append("m2ts")
if(ADDON.getSetting("webm") == 'true'):
fileTypes.append("webm")
return '|'.join(fileTypes)
@staticmethod
def isShuffleThemes():
return ADDON.getSetting("shuffle") == 'true'
@staticmethod
def isRandomStart():
return ADDON.getSetting("random") == 'true'
@staticmethod
def getRandomFixedOffset(filename):
if not Settings.isRandomStart() or (filename in [None, ""]):
return -1
fixedOffsetSetting = "randomFixedAudioOffset"
if Settings.isVideoFile(filename):
fixedOffsetSetting = "randomFixedVideoOffset"
return int(float(ADDON.getSetting(fixedOffsetSetting)))
@staticmethod
def isPlayMovieList():
return ADDON.getSetting("movielist") == 'true'
@staticmethod
def isPlayTvShowList():
return ADDON.getSetting("tvlist") == 'true'
@staticmethod
def isPlayMusicVideoList():
return ADDON.getSetting("musicvideolist") == 'true'
@staticmethod
def isPlayVideoInformation():
return ADDON.getSetting("videoInformation") == 'true'
@staticmethod
def isPlayTvShowSeasons():
return ADDON.getSetting("tvShowSeasons") == 'true'
@staticmethod
def isPlayTvShowEpisodes():
return ADDON.getSetting("tvShowEpisodes") == 'true'
@staticmethod
def isPlayMusicList():
return ADDON.getSetting("musiclist") == 'true'
@staticmethod
def getPlayDurationLimit():
return int(float(ADDON.getSetting("endafter")))
@staticmethod
def getTrackLengthLimit():
return int(float(ADDON.getSetting("trackLengthLimit")))
# Check if the video info button should be hidden
@staticmethod
def hideVideoInfoButton():
return ADDON.getSetting("showVideoInfoButton") != 'true'
# Check the delay start value
@staticmethod
def getStartDelaySeconds(themeFile=None):
# check if this is a video file as the delay may be different
if Settings.isVideoFile(themeFile):
return int(float(ADDON.getSetting("delayVideoStart")))
return int(float(ADDON.getSetting("delayStart")))
@staticmethod
def isThemeDirEnabled():
# Theme sub directory only supported when not using a custom path
if Settings.isCustomPathEnabled():
return False
return ADDON.getSetting("searchSubDir") == 'true'
@staticmethod
def getThemeDirectory():
# Load the information about storing themes in sub-directories
# Only use the Theme dir if custom path is not used
return ADDON.getSetting("subDirName")
@staticmethod
def getStartupVolume():
# Check to see if the volume needs to be changed when the system starts
if ADDON.getSetting("resetVolumeOnStartup") == 'true':
return int(float(ADDON.getSetting("resetStartupVolumeValue")))
return -1
@staticmethod
def isVideoThemesOnlyIfOneExists():
index = int(ADDON.getSetting("playVideoThemeRules"))
if index == 2:
return True
return False
@staticmethod
def isVideoThemesFirst():
index = int(ADDON.getSetting("playVideoThemeRules"))
if index == 1:
return True
return False
@staticmethod
def useTrailers():
return ADDON.getSetting("useTrailers") == "true"
@staticmethod
def onlyPlaySingleTheme():
return ADDON.getSetting("singleThemeOnly") == 'true'
@staticmethod
def isRepeatSingleAudioAfterVideo():
if ADDON.getSetting("repeatSingleAudioAfterVideo") == 'true':
if Settings.isVideoThemesFirst():
return True
return False
@staticmethod
def showOnContextMenu():
return ADDON.getSetting("showOnContextMenu") == "true"
@staticmethod
def blockRefreshRateChange():
return ADDON.getSetting("blockChangeInRefreshRate") == "true"
| gpl-2.0 | 4,027,405,000,036,394,500 | 35.173913 | 263 | 0.627304 | false |
kburts/drf-music | Scripts/redditBot.py | 1 | 2545 | """
Script to get data from reddit(specificly /r/listentothis or /r/music. It is supposed to be
independent from the Django stuff.
Creates a new playlist on server with:
title: Listentothis
description: date created and objective of playlist
songs: front page of /r/listentothis
NOTE: Only works with YouTube. No soundcloud... yet
"""
import json
import requests
import sys
## Gather information variables
REDDIT = 'http://reddit.com/r/'
SUBREDDIT = 'listentothis'
## Use information variables
SERVER = 'http://127.0.0.1:8000/'
def CollectRedditData():
toPost = []
data = requests.get(REDDIT + SUBREDDIT + '.json')
data = json.loads(data.text).get('data').get('children')
for post in data:
if "youtube.com/" in post.get('data').get('url'):
toPost.append(
{'title': post.get('data').get('title'),
'url': post.get('data').get('url')}
)
else:
print 'removed non YT link!'
return toPost
## Send data to server
def addSong(song, token, playlistID):
songAPIEndpoint = SERVER + 'api/song/'
s = requests.post(
url = songAPIEndpoint,
data = ({
'name': song.get('title'),
'url': song.get('url'),
'playlist': playlistID
}),
headers = ({
'Authorization': 'JWT %s' %token.json().get('token')
})
)
print s
print s.text
def createPlaylist(password, token):
playlistAPIEndpoint = SERVER + 'api/playlist/'
playlist = requests.post(
url = playlistAPIEndpoint,
data = ({
'title': '/r/listentothis',
'description': 'Auto-generated playlist'
}),
headers = ({
'Authorization': 'JWT %s' %token.json().get('token')
})
)
return playlist.json().get('id')
def UploadToServer(songs, password, user='listentothis'):
## Get JWT Token
jwtUrl = SERVER + 'api-token-auth/'
token = requests.post(
url = jwtUrl,
data = ({
'username': user,
'password': password
})
)
## Create new playlist
playlist = createPlaylist(password, token)
## Upload new songs to server
for song in songs:
addSong(song, token, playlist)
if __name__ == "__main__":
try:
password = sys.argv[1]
except:
print "Supply system argument with password"
sys.exit()
data = CollectRedditData()
UploadToServer(data, password)
| mit | 7,201,106,919,070,885,000 | 23.95098 | 91 | 0.571709 | false |
scotthartbti/android_external_chromium_org | build/linux/unbundle/remove_bundled_libraries.py | 43 | 2549 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Removes bundled libraries to make sure they are not used.
See README for more details.
"""
import optparse
import os.path
import sys
def DoMain(argv):
my_dirname = os.path.abspath(os.path.dirname(__file__))
source_tree_root = os.path.abspath(
os.path.join(my_dirname, '..', '..', '..'))
if os.path.join(source_tree_root, 'build', 'linux', 'unbundle') != my_dirname:
print ('Sanity check failed: please run this script from ' +
'build/linux/unbundle directory.')
return 1
parser = optparse.OptionParser()
parser.add_option('--do-remove', action='store_true')
options, args = parser.parse_args(argv)
exclusion_used = {}
for exclusion in args:
exclusion_used[exclusion] = False
for root, dirs, files in os.walk(source_tree_root, topdown=False):
# Only look at paths which contain a "third_party" component
# (note that e.g. third_party.png doesn't count).
root_relpath = os.path.relpath(root, source_tree_root)
if 'third_party' not in root_relpath.split(os.sep):
continue
for f in files:
path = os.path.join(root, f)
relpath = os.path.relpath(path, source_tree_root)
excluded = False
for exclusion in args:
if relpath.startswith(exclusion):
# Multiple exclusions can match the same path. Go through all of them
# and mark each one as used.
exclusion_used[exclusion] = True
excluded = True
if excluded:
continue
# Deleting gyp files almost always leads to gyp failures.
# These files come from Chromium project, and can be replaced if needed.
if f.endswith('.gyp') or f.endswith('.gypi'):
continue
if options.do_remove:
# Delete the file - best way to ensure it's not used during build.
os.remove(path)
else:
# By default just print paths that would be removed.
print path
exit_code = 0
# Fail if exclusion list contains stale entries - this helps keep it
# up to date.
for exclusion, used in exclusion_used.iteritems():
if not used:
print '%s does not exist' % exclusion
exit_code = 1
if not options.do_remove:
print ('To actually remove files printed above, please pass ' +
'--do-remove flag.')
return exit_code
if __name__ == '__main__':
sys.exit(DoMain(sys.argv[1:]))
| bsd-3-clause | -2,765,794,157,529,262,000 | 28.298851 | 80 | 0.650059 | false |
frreiss/tensorflow-fred | tensorflow/tools/build_info/gen_build_info.py | 4 | 3276 | # Lint as: python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates a Python module containing information about the build."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import six
# cuda.cuda is only valid in OSS
try:
from cuda.cuda import cuda_config # pylint: disable=g-import-not-at-top
except ImportError:
cuda_config = None
def write_build_info(filename, key_value_list):
"""Writes a Python that describes the build.
Args:
filename: filename to write to.
key_value_list: A list of "key=value" strings that will be added to the
module's "build_info" dictionary as additional entries.
"""
build_info = {}
if cuda_config:
build_info.update(cuda_config.config)
for arg in key_value_list:
key, value = six.ensure_str(arg).split("=")
if value.lower() == "true":
build_info[key] = True
elif value.lower() == "false":
build_info[key] = False
else:
build_info[key] = value.format(**build_info)
# Sort the build info to ensure deterministic output.
sorted_build_info_pairs = sorted(build_info.items())
contents = """
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
\"\"\"Auto-generated module providing information about the build.\"\"\"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
build_info = collections.OrderedDict(%s)
""" % sorted_build_info_pairs
open(filename, "w").write(contents)
parser = argparse.ArgumentParser(
description="""Build info injection into the PIP package.""")
parser.add_argument("--raw_generate", type=str, help="Generate build_info.py")
parser.add_argument(
"--key_value", type=str, nargs="*", help="List of key=value pairs.")
args = parser.parse_args()
if args.raw_generate:
write_build_info(args.raw_generate, args.key_value)
else:
raise RuntimeError("--raw_generate must be used.")
| apache-2.0 | -8,610,942,156,774,095,000 | 32.428571 | 80 | 0.688645 | false |
Degustare/degustare | degustare/promoters/models.py | 1 | 3519 | # -*- coding: UTF-8 -*-
# models.py
#
# Copyright (C) 2013 Degustare
#
# Author(s): Cédric Gaspoz <[email protected]>
#
# This file is part of Degustare.
#
# Degustare is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Degustare is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with degustare. If not, see <http://www.gnu.org/licenses/>.
# Stdlib imports
# Core Django imports
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Third-party app imports
from django_extensions.db.models import TimeStampedModel
# Degustare imports
from core.models import RenameFilesModel
class Promoter(RenameFilesModel, TimeStampedModel):
"""
The organizer of a wine tasting event. A promoter can be an individual or a company.
"""
name = models.CharField(_("name"), max_length=100, help_text=_("The name of the promoter."))
display_name = models.CharField(_("display name"), max_length=100, help_text=_("The name of the promoter that will be displayed on the application."))
description = models.TextField(_("description"), blank=True, help_text=_("The description of the promoter. This field could be null."))
public_address = models.TextField(_("address"), blank=True, help_text=_("The promoter's address that will be displayed on the application. This field could be null."))
public_email = models.EmailField(_("e-mail"), blank=True, help_text=_("The promoter's e-mail address that will be displayed on the application. This field could be null."))
public_url = models.URLField(_("website"), blank=True, help_text=_("The promoter's website that will be displayed on the application. This field could be null."))
public_info = models.TextField(_("information"), blank=True, help_text=_("Information that are directed to visitors of the application (Opening hours, policies, etc.). This field could be null."))
logo_web_small = models.ImageField(_("small web logo"), upload_to='tmp', blank=True, help_text="A max X00xX00px logo that will be displayed on the application. If null, the official Degustare logo will be displayed.")
logo_web_large = models.ImageField(_("large web logo"), upload_to='tmp', blank=True, help_text="A max X00xX00px logo that will be displayed on the application. If null, the official Degustare logo will be displayed.")
logo_print = models.ImageField(_("print logo"), upload_to='tmp', blank=True, help_text="A high resolution logo that will be printed on various documents (invoices, evaluations, reports, etc. If null, the official Degustare logo will be used.")
RENAME_FILES = {
'logo_web_small': {'dest': 'promoters/logos', 'suffix': '_web_small', 'keep_ext': True},
'logo_web_large': {'dest': 'promoters/logos', 'suffix': '_web_large', 'keep_ext': True},
'logo_print': {'dest': 'promoters/logos', 'suffix': '_print', 'keep_ext': True},
}
class Meta:
verbose_name = _('promoter')
verbose_name_plural = _('promoters')
ordering = ['display_name']
def __unicode__(self):
return self.display_name | gpl-3.0 | 2,532,337,192,749,681,000 | 54.857143 | 247 | 0.712052 | false |
arunkgupta/gramps | gramps/gen/filters/rules/person/_nodeathdate.py | 1 | 2022 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
# "People without a death date"
#-------------------------------------------------------------------------
class NoDeathdate(Rule):
"""People without a death date"""
name = _('People without a known death date')
description = _("Matches people without a known deathdate")
category = _('General filters')
def apply(self,db,person):
death_ref = person.get_death_ref()
if not death_ref:
return True
death = db.get_event_from_handle(death_ref.ref)
if death:
death_obj = death.get_date_object()
if not death_obj:
return True
if death_obj.sortval == 0:
return True
return False
| gpl-2.0 | -7,554,091,370,283,989,000 | 33.862069 | 75 | 0.519288 | false |
juhovh/tapcfg | scons-tools/gmcs.py | 7 | 1292 | import os.path
import SCons.Builder
import SCons.Node.FS
import SCons.Util
csccom = "$CSC $CSCFLAGS $_CSCLIBPATH -r:$_CSCLIBS -out:${TARGET.abspath} $SOURCES"
csclibcom = "$CSC -t:library $CSCLIBFLAGS $_CSCLIBPATH $_CSCLIBS -out:${TARGET.abspath} $SOURCES"
McsBuilder = SCons.Builder.Builder(action = '$CSCCOM',
source_factory = SCons.Node.FS.default_fs.Entry,
suffix = '.exe')
McsLibBuilder = SCons.Builder.Builder(action = '$CSCLIBCOM',
source_factory = SCons.Node.FS.default_fs.Entry,
suffix = '.dll')
def generate(env):
env['BUILDERS']['CLIProgram'] = McsBuilder
env['BUILDERS']['CLILibrary'] = McsLibBuilder
env['CSC'] = 'gmcs'
env['_CSCLIBS'] = "${_stripixes('-r:', CILLIBS, '', '-r', '', __env__)}"
env['_CSCLIBPATH'] = "${_stripixes('-lib:', CILLIBPATH, '', '-r', '', __env__)}"
env['CSCFLAGS'] = SCons.Util.CLVar('-platform:anycpu -codepage:utf8')
env['CSCLIBFLAGS'] = SCons.Util.CLVar('-platform:anycpu -codepage:utf8')
env['CSCCOM'] = SCons.Action.Action(csccom)
env['CSCLIBCOM'] = SCons.Action.Action(csclibcom)
def exists(env):
return internal_zip or env.Detect('gmcs')
| lgpl-2.1 | -2,434,527,621,287,536,000 | 39.375 | 97 | 0.581269 | false |
dbrumley/recfi | llvm-3.3/docs/conf.py | 4 | 8437 | # -*- coding: utf-8 -*-
#
# LLVM documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'LLVM'
copyright = u'2003-2013, LLVM Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.3'
# The full version, including alpha/beta/rc tags.
release = '3.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'llvm-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = { "nosidebar": True }
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'LLVMdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'LLVM.tex', u'LLVM Documentation',
u'LLVM project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = []
# Automatically derive the list of man pages from the contents of the command
# guide subdirectory.
basedir = os.path.dirname(__file__)
man_page_authors = "Maintained by The LLVM Team (http://llvm.org/)."
command_guide_subpath = 'CommandGuide'
command_guide_path = os.path.join(basedir, command_guide_subpath)
for name in os.listdir(command_guide_path):
# Ignore non-ReST files and the index page.
if not name.endswith('.rst') or name in ('index.rst',):
continue
# Otherwise, automatically extract the description.
file_subpath = os.path.join(command_guide_subpath, name)
with open(os.path.join(command_guide_path, name)) as f:
title = f.readline().rstrip('\n')
header = f.readline().rstrip('\n')
if len(header) != len(title):
print >>sys.stderr, (
"error: invalid header in %r (does not match title)" % (
file_subpath,))
if ' - ' not in title:
print >>sys.stderr, (
("error: invalid title in %r "
"(expected '<name> - <description>')") % (
file_subpath,))
# Split the name out of the title.
name,description = title.split(' - ', 1)
man_pages.append((file_subpath.replace('.rst',''), name,
description, man_page_authors, 1))
# If true, show URL addresses after external links.
#man_show_urls = False
# FIXME: Define intersphinx configration.
intersphinx_mapping = {}
| mit | 2,591,951,988,882,091,500 | 32.480159 | 80 | 0.690648 | false |
PandaWei/tp-qemu | qemu/tests/live_snapshot_transaction.py | 6 | 1981 | import logging
from autotest.client.shared import error
from qemu.tests import live_snapshot_basic
@error.context_aware
def run(test, params, env):
"""
live_snapshot_transaction test:
1. Boot up guest with a system disk and 2 data disk.
2. Create multiple live snapshots simultaneously for all 3 disks with transaction.
3. Check guest which should boot up and reboot successfully.
:param test: Kvm test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
arg_list = []
try:
for image in params.objects("images"):
image_params = params.object_params(image)
transaction_test = live_snapshot_basic.LiveSnapshot(test,
image_params,
env, image)
transaction_test.snapshot_args.update({"device": transaction_test.device})
transaction_test.snapshot_file = image + "-snap"
snapshot_file = transaction_test.get_snapshot_file()
transaction_test.snapshot_args.update({"snapshot-file": snapshot_file})
args = {"type": "blockdev-snapshot-sync",
"data": transaction_test.snapshot_args}
arg_list.append(args)
error.context("Create multiple live snapshots simultaneously"
" with transaction", logging.info)
output = transaction_test.vm.monitor.transaction(arg_list)
# return nothing on successful transaction
if bool(output):
raise error.TestFail("Live snapshot transatcion failed,"
" there should be nothing on success.\n"
"More details: %s" % output)
transaction_test.action_after_finished()
finally:
try:
transaction_test.clean()
except Exception:
pass
| gpl-2.0 | 4,695,742,916,996,725,000 | 40.270833 | 86 | 0.590106 | false |
Williams224/davinci-scripts | kstaretappipig/MC_12_MagDown_Kstar_etap_pipig.py | 1 | 13688 | #-- GAUDI jobOptions generated on Tue Jun 9 10:51:04 2015
#-- Contains event types :
#-- 11104115 - 146 files - 3030432 events - 881.12 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-124834
#-- StepId : 124834
#-- StepName : Reco14a for MC
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p7
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-125836
#-- StepId : 125836
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08 - Implicit merging.
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000001_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000002_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000003_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000004_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000005_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000006_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000007_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000008_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000009_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000010_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000011_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000012_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000013_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000014_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000015_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000016_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000017_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000018_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000019_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000020_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000021_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000022_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000023_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000024_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000025_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000026_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000027_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000028_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000029_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000030_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000031_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000032_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000033_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000034_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000035_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000036_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000037_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000038_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000039_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000040_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000041_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000042_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000043_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000044_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000045_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000046_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000047_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000048_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000050_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000051_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000052_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000053_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000054_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000055_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000056_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000057_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000058_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000059_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000060_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000061_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000062_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000063_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000064_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000065_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000066_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000067_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000068_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000069_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000070_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000071_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000072_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000073_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000074_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000075_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000076_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000077_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000078_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000079_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000080_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000081_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000082_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000083_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000084_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000085_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000086_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000087_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000088_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000089_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000090_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000091_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000092_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000093_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000094_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000095_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000096_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000097_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000098_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000099_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000100_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000101_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000102_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000103_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000104_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000105_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000106_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000107_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000108_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000109_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000110_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000111_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000112_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000113_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000114_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000115_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000116_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000117_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000119_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000120_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000121_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000122_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000123_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000124_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000125_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000126_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000127_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000128_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000129_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000130_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000131_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000132_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000133_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000134_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000135_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000136_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000137_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000138_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000140_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000141_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000142_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000143_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000144_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000145_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000146_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000147_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000148_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00038847/0000/00038847_00000149_2.AllStreams.dst'
], clear=True)
| mit | 4,996,624,453,627,941,000 | 74.208791 | 215 | 0.79325 | false |
google-research/google-research | etcmodel/models/hotpotqa/eval_utils.py | 1 | 15626 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for making predictions for HotpotQA."""
import collections
from typing import Sequence, Mapping, Tuple, List
import numpy as np
from etcmodel.models import tokenization
from etcmodel.models.hotpotqa import data_utils
from etcmodel.models.hotpotqa import generate_tf_examples_lib as lib
_TITLE_AND_SENTENCE_TYPE_IDS = [
lib.SENTENCE_TOKEN_TYPE_ID,
lib.TITLE_TOKEN_TYPE_ID,
]
_SpanType = Tuple[int, int]
_RawPredictionType = Mapping[str, np.ndarray]
def get_final_text(token_text: str,
unigram_text: str,
do_lower_case: bool = True) -> str:
"""Projects the token-concated text back to the unigram-concated text.
This function is branched from the original BERT `run_squad.py`.
When we created the data, we kept track of the alignment between original
(whitespace tokenized) tokens and our WordPiece tokenized tokens. So
now `unigram_text` contains the span of our original text corresponding to the
span that we predicted.
However, `unigram_text` may contain extra characters that we don't want in
our prediction.
For example, let's say:
token_text = steve smith
unigram_text = Steve Smith's
We don't want to return `unigram_text` because it contains the extra "'s".
We don't want to return `token_text` because it's already been normalized
(the SQuAD eval script also does punctuation stripping/lower casing but
our tokenizer does additional normalization like stripping accent
characters).
What we really want to return is "Steve Smith".
Therefore, we have to apply a semi-complicated alignment heruistic between
`token_text` and `unigram_text` to get a character-to-charcter alignment. This
can fail in certain cases in which case we just return `unigram_text`.
Args:
token_text: The text obtained by concatenating wordpiece tokens and removing
'##' and ' ##' symbols.
unigram_text: The text obtained by concatenating unigrams.
do_lower_case: Whether the tokenizer is doing lower case.
Returns:
The text corresponding to `token_text` in `unigram_text`. If unable to find
such correspondence, `unigram_text` is returned directly.
"""
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `unigram_text`, strip whitespace from the result
# and `token_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(unigram_text))
start_position = tok_text.find(token_text)
if start_position == -1:
return unigram_text
end_position = start_position + len(token_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(unigram_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return unigram_text
# We then project the characters in `token_text` back to `unigram_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for i, tok_index in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return unigram_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return unigram_text
output_text = unigram_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_sentence_text(sentence_id: int, raw_prediction: _RawPredictionType,
data_point) -> str:
"""Gets the sentence (or title) text in the json data point."""
actual_paragraph_id = raw_prediction["global_paragraph_ids"][sentence_id]
if actual_paragraph_id == -1:
return ""
actual_sentence_id = raw_prediction["global_sentence_ids"][sentence_id]
title, sentences = data_point["context"][actual_paragraph_id]
if actual_sentence_id == -1:
return title
return sentences[actual_sentence_id]
def _get_answer_unigram_text(token_span: _SpanType,
raw_prediction: _RawPredictionType,
data_point) -> str:
"""Gets the original answer unigram text corresponding to the token span."""
unigram_span = tuple(
raw_prediction["long_tokens_to_unigrams"][idx] for idx in token_span)
sentence_id = raw_prediction["long_sentence_ids"][token_span[0]]
sentence_text = _get_sentence_text(sentence_id, raw_prediction, data_point)
sentence_unigrams, _, _ = data_utils.whitespace_split_with_indices(
sentence_text)
answer_unigrams = sentence_unigrams[unigram_span[0]:unigram_span[1] + 1]
return " ".join(answer_unigrams)
def _get_wordpiece_detokenized_text(
token_span: _SpanType, raw_prediction: _RawPredictionType,
tokenizer: tokenization.FullTokenizer) -> str:
"""Gets the normalized answer token text given the token span."""
answer_tokens = tokenizer.convert_ids_to_tokens(
raw_prediction["long_token_ids"][token_span[0]:token_span[1] + 1])
return data_utils.wordpiece_tokens_to_normalized_text(answer_tokens)
def _get_wordpiece_final_text(token_span: _SpanType,
raw_prediction: _RawPredictionType, data_point,
tokenizer: tokenization.FullTokenizer):
"""Gets final text using WordPiece tokens."""
answer_unigram_text = _get_answer_unigram_text(token_span, raw_prediction,
data_point)
answer_token_text = _get_wordpiece_detokenized_text(token_span,
raw_prediction, tokenizer)
return get_final_text(answer_token_text, answer_unigram_text, True)
def _get_sentencepiece_detokenized_text(token_span: _SpanType,
raw_prediction: _RawPredictionType,
tokenizer: tokenization.FullTokenizer):
"""Gets final text using SentencePiece tokens."""
long_token_ids = raw_prediction["long_token_ids"]
answer_tokens = tokenizer.convert_ids_to_tokens(
long_token_ids[token_span[0]:token_span[1] + 1].tolist())
return data_utils.sentencepiece_detokenize(answer_tokens)
def get_spans_from_bio_encoding(
raw_prediction: _RawPredictionType, max_answer_length: int,
supporting_facts: Sequence[bool]) -> List[Tuple[float, _SpanType]]:
"""Gets top-1 answer span from BIO encoding."""
answer_bio_probs = raw_prediction["answer_bio_probs"]
answer_bio_ids = raw_prediction["answer_bio_ids"]
long_token_type_ids = raw_prediction["long_token_type_ids"]
long_sentence_ids = raw_prediction["long_sentence_ids"]
answer_spans = []
for begin in np.where(answer_bio_ids == 0)[0]:
if long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS:
continue
end = begin
while end + 1 < len(answer_bio_ids) and answer_bio_ids[end + 1] == 1:
end += 1
if long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS:
continue
# Begin and end must belong to a same sentence.
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if begin_sentence_id != end_sentence_id:
continue
# The sentence containing begin and end must be a supporting facts.
if not supporting_facts[begin_sentence_id]:
continue
if end - begin + 1 > max_answer_length:
continue
answer_spans.append((answer_bio_probs[begin], (begin, end)))
return answer_spans
def get_spans_from_bio_encoding_v2(
raw_prediction: _RawPredictionType, max_answer_length: int,
supporting_facts: Sequence[bool]) -> List[Tuple[float, _SpanType]]:
"""Gets top-1 answer span from BIO encoding."""
answer_bio_probs = raw_prediction["answer_bio_probs"]
answer_bio_ids = raw_prediction["answer_bio_ids"]
long_token_type_ids = raw_prediction["long_token_type_ids"]
long_sentence_ids = raw_prediction["long_sentence_ids"]
span_candidates = []
curr_begin = None
for index, bio_id in enumerate(answer_bio_ids):
if bio_id == 0:
if curr_begin is not None:
span_candidates.append((curr_begin, index - 1))
curr_begin = index
elif bio_id == 1:
# Even a span do not start with "B", still consider as a candidate span.
if curr_begin is None:
curr_begin = index
elif curr_begin is not None:
span_candidates.append((curr_begin, index - 1))
curr_begin = None
answer_spans = []
for begin, end in span_candidates:
# Begin and end must be of title and sentence type.
if (long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS or
long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS):
continue
# Begin and end must belong to a same sentence.
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if begin_sentence_id != end_sentence_id:
continue
# The sentence containing begin and end must be a supporting facts.
if not supporting_facts[begin_sentence_id]:
continue
if end - begin + 1 > max_answer_length:
continue
score = sum(answer_bio_probs[begin:end + 1]) / (end - begin + 1)
answer_spans.append((score, (begin, end)))
return answer_spans
def get_spans_from_span_encoding(
raw_prediction: _RawPredictionType, max_answer_length: int,
supporting_facts: Sequence[bool]) -> List[Tuple[float, _SpanType]]:
"""Gets top-1 answer span from SPAN encoding."""
begin_probs = raw_prediction["answer_begin_top_probs"]
begin_indices = raw_prediction["answer_begin_top_indices"]
end_probs = raw_prediction["answer_end_top_probs"]
end_indices = raw_prediction["answer_end_top_indices"]
long_token_type_ids = raw_prediction["long_token_type_ids"]
long_sentence_ids = raw_prediction["long_sentence_ids"]
answer_spans = []
for begin_prob, begin in zip(begin_probs, begin_indices):
if long_token_type_ids[begin] not in _TITLE_AND_SENTENCE_TYPE_IDS:
continue
for end_prob, end in zip(end_probs, end_indices):
if long_token_type_ids[end] not in _TITLE_AND_SENTENCE_TYPE_IDS:
continue
# Begin and end must belong to a same sentence.
begin_sentence_id = long_sentence_ids[begin]
end_sentence_id = long_sentence_ids[end]
if begin_sentence_id != end_sentence_id:
continue
# The sentence containing begin and end must be a supporting facts.
if not supporting_facts[begin_sentence_id]:
continue
if begin > end or end - begin + 1 > max_answer_length:
continue
answer_spans.append((begin_prob * end_prob, (begin, end)))
return answer_spans
def get_top1_answer(raw_prediction: _RawPredictionType, data_point,
max_answer_length: int, supporting_facts: Sequence[bool],
tokenizer: tokenization.FullTokenizer, use_wordpiece: bool,
answer_encoding_method: str) -> str:
"""Gets top-1 answer text."""
if answer_encoding_method == "span":
answer_spans = get_spans_from_span_encoding(raw_prediction,
max_answer_length,
supporting_facts)
elif answer_encoding_method == "bio":
answer_spans = get_spans_from_bio_encoding_v2(raw_prediction,
max_answer_length,
supporting_facts)
else:
raise ValueError(f"Invalid answer encoding method {answer_encoding_method}")
if not answer_spans:
return ""
token_span = sorted(answer_spans)[-1][1]
if use_wordpiece:
return _get_wordpiece_final_text(token_span, raw_prediction, data_point,
tokenizer)
return _get_sentencepiece_detokenized_text(token_span, raw_prediction,
tokenizer)
def generate_prediction_json(raw_predictions: Sequence[_RawPredictionType],
gold_json_data,
tokenizer: tokenization.FullTokenizer,
sp_threshold: float = 0.5,
max_answer_length: int = 30,
use_wordpiece: bool = False,
answer_encoding_method: str = "span"):
"""Generates HotpotQA official format prediction json object.
Args:
raw_predictions: Raw model predict outputs.
gold_json_data: Gold json eval data.
tokenizer: The BERT tokenizer.
sp_threshold: Probability threshold for prediction supporting facts.
max_answer_length: Max number of wordpiece tokens allowed for answer.
use_wordpiece: Whether WordPirce tokenizer is used.
answer_encoding_method: The answer encoding method.
Returns:
The official json format of predictions.
"""
ids_to_raw_predictions = {}
for raw_prediction in raw_predictions:
unique_id = raw_prediction["unique_ids"]
if isinstance(unique_id, bytes):
unique_id = unique_id.decode("utf-8")
ids_to_raw_predictions[unique_id] = raw_prediction
answers = {}
sps = {}
for data_point in gold_json_data:
unique_id = data_point["_id"]
answers[unique_id] = ""
sps[unique_id] = []
raw_prediction = ids_to_raw_predictions.get(unique_id, None)
if raw_prediction is None:
continue
# Predicts supporting facts.
supporting_facts = raw_prediction["supporting_facts_probs"] >= sp_threshold
for sp, para_id, sent_id in zip(supporting_facts,
raw_prediction["global_paragraph_ids"],
raw_prediction["global_sentence_ids"]):
if para_id != -1 and sent_id != -1 and sp:
title = data_point["context"][para_id][0]
sps[unique_id].append([title, int(sent_id)])
# Predicts answer text.
answer_type = raw_prediction["answer_types"]
if answer_type == 0:
answers[unique_id] = get_top1_answer(raw_prediction, data_point,
max_answer_length, supporting_facts,
tokenizer, use_wordpiece,
answer_encoding_method)
elif answer_type == 1:
answers[unique_id] = "yes"
else:
answers[unique_id] = "no"
return {"answer": answers, "sp": sps}
| apache-2.0 | -2,961,833,774,225,874,400 | 39.692708 | 80 | 0.656982 | false |
auready/docker-py | tests/integration/api_swarm_test.py | 2 | 6265 | import copy
import docker
import pytest
from ..helpers import force_leave_swarm, requires_api_version
from .base import BaseAPIIntegrationTest
class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
super(SwarmTest, self).setUp()
force_leave_swarm(self.client)
def tearDown(self):
super(SwarmTest, self).tearDown()
force_leave_swarm(self.client)
@requires_api_version('1.24')
def test_init_swarm_simple(self):
assert self.init_swarm()
@requires_api_version('1.24')
def test_init_swarm_force_new_cluster(self):
pytest.skip('Test stalls the engine on 1.12.0')
assert self.init_swarm()
version_1 = self.client.inspect_swarm()['Version']['Index']
assert self.client.init_swarm(force_new_cluster=True)
version_2 = self.client.inspect_swarm()['Version']['Index']
assert version_2 != version_1
@requires_api_version('1.24')
def test_init_already_in_cluster(self):
assert self.init_swarm()
with pytest.raises(docker.errors.APIError):
self.init_swarm()
@requires_api_version('1.24')
def test_init_swarm_custom_raft_spec(self):
spec = self.client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
@requires_api_version('1.24')
def test_leave_swarm(self):
assert self.init_swarm()
with pytest.raises(docker.errors.APIError) as exc_info:
self.client.leave_swarm()
exc_info.value.response.status_code == 500
assert self.client.leave_swarm(force=True)
with pytest.raises(docker.errors.APIError) as exc_info:
self.client.inspect_swarm()
exc_info.value.response.status_code == 406
assert self.client.leave_swarm(force=True)
@requires_api_version('1.24')
def test_update_swarm(self):
assert self.init_swarm()
swarm_info_1 = self.client.inspect_swarm()
spec = self.client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200,
node_cert_expiry=7776000000000000
)
assert self.client.update_swarm(
version=swarm_info_1['Version']['Index'],
swarm_spec=spec, rotate_worker_token=True
)
swarm_info_2 = self.client.inspect_swarm()
assert (
swarm_info_1['Version']['Index'] !=
swarm_info_2['Version']['Index']
)
assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000
assert (
swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
)
assert (
swarm_info_1['JoinTokens']['Manager'] ==
swarm_info_2['JoinTokens']['Manager']
)
assert (
swarm_info_1['JoinTokens']['Worker'] !=
swarm_info_2['JoinTokens']['Worker']
)
@requires_api_version('1.24')
def test_update_swarm_name(self):
assert self.init_swarm()
swarm_info_1 = self.client.inspect_swarm()
spec = self.client.create_swarm_spec(
node_cert_expiry=7776000000000000, name='reimuhakurei'
)
assert self.client.update_swarm(
version=swarm_info_1['Version']['Index'], swarm_spec=spec
)
swarm_info_2 = self.client.inspect_swarm()
assert (
swarm_info_1['Version']['Index'] !=
swarm_info_2['Version']['Index']
)
assert swarm_info_2['Spec']['Name'] == 'reimuhakurei'
@requires_api_version('1.24')
def test_list_nodes(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
assert 'ID' in node
assert 'Spec' in node
assert node['Spec']['Role'] == 'manager'
filtered_list = self.client.nodes(filters={
'id': node['ID']
})
assert len(filtered_list) == 1
filtered_list = self.client.nodes(filters={
'role': 'worker'
})
assert len(filtered_list) == 0
@requires_api_version('1.24')
def test_inspect_node(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
node_data = self.client.inspect_node(node['ID'])
assert node['ID'] == node_data['ID']
assert node['Version'] == node_data['Version']
@requires_api_version('1.24')
def test_update_node(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
node = nodes_list[0]
orig_spec = node['Spec']
# add a new label
new_spec = copy.deepcopy(orig_spec)
new_spec['Labels'] = {'new.label': 'new value'}
self.client.update_node(node_id=node['ID'],
version=node['Version']['Index'],
node_spec=new_spec)
updated_node = self.client.inspect_node(node['ID'])
assert new_spec == updated_node['Spec']
# Revert the changes
self.client.update_node(node_id=node['ID'],
version=updated_node['Version']['Index'],
node_spec=orig_spec)
reverted_node = self.client.inspect_node(node['ID'])
assert orig_spec == reverted_node['Spec']
@requires_api_version('1.24')
def test_remove_main_node(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
node_id = nodes_list[0]['ID']
with pytest.raises(docker.errors.NotFound):
self.client.remove_node('foobar01')
with pytest.raises(docker.errors.APIError) as e:
self.client.remove_node(node_id)
assert e.value.response.status_code == 500
with pytest.raises(docker.errors.APIError) as e:
self.client.remove_node(node_id, True)
assert e.value.response.status_code == 500
| apache-2.0 | -5,370,201,884,570,877,000 | 34.596591 | 79 | 0.584836 | false |
pytorch/fairseq | examples/roberta/wsc/wsc_criterion.py | 1 | 6037 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
from fairseq.data import encoders
@register_criterion("wsc")
class WSCCriterion(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if self.args.save_predictions is not None:
self.prediction_h = open(self.args.save_predictions, "w")
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args.bpe)
self.tokenizer = encoders.build_tokenizer(args.tokenizer)
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0)
parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0)
parser.add_argument(
"--wsc-cross-entropy",
action="store_true",
help="use cross entropy formulation instead of margin loss",
)
parser.add_argument(
"--save-predictions", metavar="FILE", help="file to save predictions to"
)
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def get_lprobs(self, model, tokens, mask):
logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(
torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
query_lprobs.new([0]).long(),
)
else:
return (
-query_lprobs
+ self.args.wsc_margin_alpha
* (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0)
).sum()
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
loss, nloss = 0.0, 0
ncorrect, nqueries = 0, 0
for i, label in enumerate(sample["labels"]):
query_lprobs = self.get_lprobs(
model,
sample["query_tokens"][i].unsqueeze(0),
sample["query_masks"][i].unsqueeze(0),
)
cand_lprobs = self.get_lprobs(
model,
sample["candidate_tokens"][i],
sample["candidate_masks"][i],
)
pred = (query_lprobs >= cand_lprobs).all().item()
if label is not None:
label = 1 if label else 0
ncorrect += 1 if pred == label else 0
nqueries += 1
if label:
# only compute a loss for positive instances
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample["id"][i].item()
if self.prediction_h is not None:
print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h)
if nloss == 0:
loss = torch.tensor(0.0, requires_grad=True)
sample_size = nqueries if nqueries > 0 else 1
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": nqueries,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
nqueries = sum(log.get("nqueries", 0) for log in logging_outputs)
if nqueries > 0:
agg_output["accuracy"] = ncorrect / float(nqueries)
return agg_output
@register_criterion("winogrande")
class WinograndeCriterion(WSCCriterion):
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
query_lprobs = self.get_lprobs(
model,
sample["query_tokens"],
sample["query_masks"],
)
cand_lprobs = self.get_lprobs(
model,
sample["candidate_tokens"],
sample["candidate_masks"],
)
pred = query_lprobs >= cand_lprobs
loss = self.get_loss(query_lprobs, cand_lprobs)
sample_size = sample["query_tokens"].size(0)
ncorrect = pred.sum().item()
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ncorrect": ncorrect,
"nqueries": sample_size,
}
return loss, sample_size, logging_output
| mit | -5,281,491,222,413,921,000 | 35.149701 | 87 | 0.568991 | false |
barseghyanartur/django-spillway | spillway/renderers/gdal.py | 2 | 3416 | import os
import tempfile
import zipfile
from rest_framework.renderers import BaseRenderer
class BaseGDALRenderer(BaseRenderer):
"""Abstract renderer which encodes to a GDAL supported raster format."""
media_type = 'application/octet-stream'
format = None
charset = None
render_style = 'binary'
def basename(self, item):
"""Returns the output filename.
Arguments:
item -- dict containing 'path'
"""
fname = os.path.basename(item['path'])
return os.path.splitext(fname)[0] + self.file_ext
@property
def file_ext(self):
return '.%s' % os.path.splitext(self.format)[0]
def render(self, data, accepted_media_type=None, renderer_context=None):
self.set_filename(self.basename(data), renderer_context)
img = data['file']
try:
imgdata = img.read()
except AttributeError:
self.set_response_length(os.path.getsize(img), renderer_context)
imgdata = open(img)
else:
img.close()
return imgdata
def set_filename(self, name, renderer_context):
type_name = 'attachment; filename=%s' % name
try:
renderer_context['response']['Content-Disposition'] = type_name
except (KeyError, TypeError):
pass
def set_response_length(self, length, renderer_context):
try:
renderer_context['response']['Content-Length'] = length
except (KeyError, TypeError):
pass
class GeoTIFFRenderer(BaseGDALRenderer):
"""Renders a raster to GeoTIFF (.tif) format."""
media_type = 'image/tiff'
format = 'tif'
class GeoTIFFZipRenderer(BaseGDALRenderer):
"""Bundles GeoTIFF rasters in a zip archive."""
media_type = 'application/zip'
format = 'tif.zip'
arcdirname = 'data'
def render(self, data, accepted_media_type=None, renderer_context=None):
if isinstance(data, dict):
data = [data]
zipname = '%s.%s' % (self.arcdirname, self.format)
self.set_filename(zipname, renderer_context)
fp = tempfile.TemporaryFile(suffix='.%s' % self.format)
with zipfile.ZipFile(fp, mode='w') as zf:
for item in data:
arcname = os.path.join(self.arcdirname, self.basename(item))
io = item['file']
try:
zf.writestr(arcname, io.read())
except AttributeError:
zf.write(io, arcname=arcname)
else:
io.close()
self.set_response_length(fp.tell(), renderer_context)
fp.seek(0)
return fp
class HFARenderer(BaseGDALRenderer):
"""Renders a raster to Erdas Imagine (.img) format."""
format = 'img'
class HFAZipRenderer(GeoTIFFZipRenderer):
"""Bundles Erdas Imagine rasters in a zip archive."""
format = 'img.zip'
class JPEGRenderer(BaseGDALRenderer):
"""Renders a raster to JPEG (.jpg) format."""
media_type = 'image/jpeg'
format = 'jpg'
class JPEGZipRenderer(GeoTIFFZipRenderer):
"""Bundles JPEG files in a zip archive."""
format = 'jpg.zip'
class PNGRenderer(BaseGDALRenderer):
"""Renders a raster to PNG (.png) format."""
media_type = 'image/png'
format = 'png'
class PNGZipRenderer(GeoTIFFZipRenderer):
"""Bundles PNG files in a zip archive."""
format = 'png.zip'
| bsd-3-clause | -7,724,765,439,345,521,000 | 28.196581 | 76 | 0.608899 | false |
linmajia/dlbench | tools/torch/torchbm.py | 2 | 6990 | import argparse
import os, sys
import time
import subprocess
# Parse arguments
current_time = time.ctime()
parser = argparse.ArgumentParser(description='Python script benchmarking torch')
parser.add_argument('-log', type=str, default=('torch_' + current_time + '.log').replace(" ", "_"),
help='Name of log file, default= torch_ + current time + .log')
parser.add_argument('-batchSize', type=str, default='64', help='Batch size in each GPU, default = 64')
parser.add_argument('-network', type=str, default='fcn5', help='name of network[fcn5 | alexnet | resnet | lstm32 | lstm64]')
parser.add_argument('-devId', type=str, help='CPU: -1, GPU: 0,1,2,3 (Multiple gpu supported)')
parser.add_argument('-numEpochs', type=str, default='10', help='number of epochs, default=10')
parser.add_argument('-epochSize', type=str, help='number of training data per epoch')
parser.add_argument('-numThreads', type=str, default='8', help='number of Threads, default=8')
parser.add_argument('-hostFile', type=str, help='path to running hosts(config in host file) for multiple machine training.')
parser.add_argument('-gpuCount', type=str, default='1', help='number of gpus in used')
parser.add_argument('-cpuCount', type=str, default='1', help='number of cpus in used for cpu version')
parser.add_argument('-lr', type=str, default='0.01', help='learning rate')
parser.add_argument('-netType', type=str, help='network type')
parser.add_argument('-debug', type=bool, default=False, help='debug mode')
args = parser.parse_args()
if args.debug: print("args: " + str(args))
# Set system variable
os.environ['OMP_NUM_THREADS'] = args.cpuCount
os.environ['OPENBLAS_NUM_THREADS'] = args.cpuCount
os.environ['MKL_NUM_THREADS'] = args.cpuCount
# Build cmd
cmd = "THC_CACHING_ALLOCATOR=1 th Main.lua "
network = args.network
numSamples = 0
if network == "fcn5":
cmd += "-LR " + args.lr +" -dataset MNIST -network ffn5"
numSamples = args.epochSize
elif network == "alexnet" or network == "resnet":
if args.devId == '-1':
cmd += " -LR " + args.lr + " -network " + network
else:
cmd += "-network " + network + " -LR " + args.lr + " "
numSamples = args.epochSize
elif "lstm" in network:
if args.devId is not None:
if "-" not in args.devId:
cmd = "THC_CACHING_ALLOCATOR=1 CUDA_VISIBLE_DEVICES=" + args.devId + " th rnn/recurrent-language-model.lua --cuda "
else:
cmd = "OMP_NUM_THREADS=%s OPENBLAS_NUM_THREADS=%s MKL_NUM_THREADS=%s th rnn/recurrent-language-model.lua --lstm --startlr 1 " % (args.cpuCount, args.cpuCount, args.cpuCount)
else:
print("Device not set, please set device by adding -devId <-1 or 0,1,2,3>. See help for more")
sys.exit(-2)
if "64" in network:
cmd += " --seqlen 64 "
else:
cmd += " --seqlen 32 "
cmd += "--lstm --hiddensize '{256,256}' --startlr " + args.lr + " --minlr " + args.lr + " "
cmd += "--batchsize " + args.batchSize + " --maxepoch " + args.numEpochs
logfile = args.log
if ".log" not in logfile:
logfile += ".log"
cmd += " >& " + logfile
if args.debug: print "cmd: " + cmd
t = time.time()
os.system(cmd)
t = time.time() - t
if args.debug: print "total time: " + str(t)
with open(logfile, "a") as logFile:
logFile.write("Total time: " + str(t) + "\n")
logFile.write(cmd + "\n")
os.system("cp " + logfile + " ../../logs")
catLog = "cat " + logfile
totalEpochBatchTime = subprocess.check_output( catLog + " | grep Speed | cut -d':' -f2 | paste -sd+ - | bc", shell=True).strip()
numEpoch = subprocess.check_output(catLog + " | grep Speed | cut -d':' -f2 | wc -l", shell=True).strip()
avgBatch = float(totalEpochBatchTime)/float(numEpoch)
avgBatch = avgBatch/1000.0
if args.debug: print("Avg Batch: " + str(avgBatch))
trainPPL = subprocess.check_output(catLog + "|grep \"Training PPL\" | cut -d':' -f2", shell=True).replace(" "," ").strip().split("\n")
valPPL = subprocess.check_output(catLog + "|grep \"Validation PPL\" | cut -d':' -f2", shell=True).replace(" "," ").strip().split("\n")
if args.debug: print "trainPPL: " + trainPPL
if args.debug: print "valPPL: " + valPPL
info = " -I "
for i in range(int(numEpoch)):
if i != 0:
info += ","
info += str(i) + ":" + valPPL[i].strip() + ":" + trainPPL[i].strip()
print " -t " + str(t) + " -a " + str(avgBatch) + info
with open(logfile, "a") as logFile:
logFile.write("Total time: " + str(t) + "\n")
os.system("cp " + logfile + " ../../logs")
sys.exit(0)
else:
print("Unknown network type " + network + ", supported ones: fcn5, alexnet, resnet, lstm32, lstm64")
sys.exit(-1)
devId = args.devId
nGPU = int(args.gpuCount)
if devId is not None:
if "-" not in devId:
if nGPU > 1:
cmd += " -nGPU " + str(nGPU)
cmd = "CUDA_VISIBLE_DEVICES=" + devId + " " + cmd
#elif "-1" == devId and (network == "ffn" or network=="fcn5"):
elif "-1" == devId:
cmd += "_cpu -nGPU 0 -type float -threads " + args.cpuCount
else:
print("Only CNN is not supported on CPU in torch")
sys.exit(-2)
else:
print("Device not set, please set device by adding -devId <-1 or 0,1,2,3>. See help for more")
sys.exit(-2)
if args.devId == '-1':
batchSize = args.batchSize
else:
batchSize = int(args.batchSize)*nGPU
numEpochs = args.numEpochs
cmd += " -batchSize " + str(batchSize) + " -epoch " + numEpochs
logfile = args.log
if ".log" not in logfile:
logfile += ".log"
cmd += ' -logfile ' + logfile + ' -save ' + logfile + " >& display.tmp"
cmd = 'THC_CACHING_ALLOCATOR=1 ' + cmd
if args.debug: print("cmd:" + cmd)
t = time.time()
os.system(cmd)
t = time.time() - t
with open(logfile, "a") as logFile:
logFile.write("Total time: " + str(t) + "\n")
logFile.write(cmd + "\n")
os.system("cp " + logfile + " ../../logs")
if args.debug: print("Time diff: " + str(t))
os.system("rm display.tmp")
getInfo = 'cat ' + logfile + ' | grep Info'
totalEpochTime = subprocess.check_output( getInfo + " | grep time | cut -d' ' -f6 | cut -d':' -f2 | paste -sd+ - | bc", shell=True)
numEpoch = subprocess.check_output(getInfo + " | grep time | cut -d' ' -f6 | cut -d':' -f2 | wc -l", shell=True)
if args.debug: print "totalEpochTime: " + totalEpochTime
if args.debug: print "numEpoch: " + numEpoch
avgEpoch = 0
if int(numEpoch) != 0:
avgEpoch = float(totalEpochTime)/float(numEpoch)
avgBatch = (avgEpoch/int(numSamples))*float(batchSize)
if args.debug: print("Avg Batch: " + str(avgBatch))
valAccuracy = subprocess.check_output(getInfo + "| grep accu | cut -d' ' -f7 | cut -d':' -f2", shell=True).strip().split('\n')
if args.debug: print "valAccuracy: " + valAccuracy
trainCE = subprocess.check_output(getInfo + "| grep Loss | cut -d' ' -f7 | cut -d':' -f2", shell=True).strip().split('\n')
if args.debug: print "trainCE: " + trainCE
info = ""
for i in range(len(valAccuracy)):
if i != 0:
info += ","
info += str(i) + ":" + valAccuracy[i] + ":" +trainCE[i]
if args.debug: print "info: " + info
print "-t " + str(t) + " -a " + str(avgBatch) + " -I " + info
| mit | -1,749,751,885,262,729,500 | 42.6875 | 176 | 0.633047 | false |
gspilio/nova | nova/openstack/common/notifier/rpc_notifier.py | 5 | 1685 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common import context as req_context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'notification_topics', default=['notifications', ],
help='AMQP topic used for openstack notifications')
CONF = cfg.CONF
CONF.register_opt(notification_topic_opt)
def notify(context, message):
"""Sends a notification via RPC"""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.notification_topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())
| apache-2.0 | -4,811,352,886,368,260,000 | 35.630435 | 78 | 0.68724 | false |
TimYi/pyresttest | pyresttest/test_mini_framework_benchmarks.py | 7 | 2117 | # Benchmarks parts of program to seee what testing overhead is like
import timeit
# Test basic pycurl create/delete, time is ~2.5 microseconds
time = timeit.timeit("mycurl=Curl(); mycurl.close()", setup="from pycurl import Curl", number=1000000)
print('Curl create/destroy runtime for 1M runs (s)'+str(time))
# Test test interpret/build & configuration speeds for resttest
# Runtime is 36.29 sec, so 36 microseconds per run, or 0.036 ms
time = timeit.timeit("mytest=Test.parse_test('', input); mycurl=mytest.configure_curl(); mycurl.close()",
setup='from resttest import Test; input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers":{"Accept":"Application/json"}}',
number=1000000)
print('Test interpret/configure test config for 1M runs (s)'+str(time))
# Just configuring the curl object from a pre-built test
# 10s/1M runs, or 0.01 ms per
time = timeit.timeit("mycurl=mytest.configure_curl(); mycurl.close()",
setup='from resttest import Test; input = {"url": "/ping", "method": "DELETE", "NAME":"foo", "group":"bar", "body":"<xml>input</xml>","headers":{"Accept":"Application/json"}}; mytest=Test.parse_test("", input);',
number=1000000)
print('Test configure curl for 1M runs (s)'+str(time))
# Time for full curl execution on Django testing rest app
# Time: 41.4s for 10k runs, or about 4.14 ms per
timeit.timeit("mycurl=mytest.configure_curl(); mycurl.setopt(pycurl.WRITEFUNCTION, lambda x: None); mycurl.perform(); mycurl.close()",
setup='import pycurl; from resttest import Test; input = {"url": "/api/person/", "NAME":"foo", "group":"bar"}; mytest=Test.parse_test("http://localhost:8000", input);',
number=10000)
# Github perf test, 27 s for 100 runs = 270 ms per
timeit.timeit("mycurl=mytest.configure_curl(); mycurl.setopt(pycurl.WRITEFUNCTION, lambda x: None); mycurl.perform(); mycurl.close()",
setup='import pycurl; from resttest import Test; input = {"url": "/search/users?q=jewzaam", "NAME":"foo", "group":"bar"}; mytest=Test.parse_test("https://api.github.com", input);',
number=100)
| apache-2.0 | -3,051,182,077,601,934,000 | 63.151515 | 220 | 0.689655 | false |
guillermooo/dart-sublime-bundle-releases | AAA.py | 1 | 3294 | # Copyright (c) 2014, Guillermo López-Anglada. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
'''This module is intended to run first when the different plugins load.
It should perform a basic status check so that we can catch severe
configuration errors early on and report them to the user.
'''
import sublime
import sublime_plugin
import os
from Dart.lib.sdk import SDK
from Dart.sublime_plugin_lib.panels import OutputPanel
from Dart.lib.error import FatalConfigError
from Dart.sublime_plugin_lib.io import touch
from Dart.lib import ga
HEADING = '''
Dart Package for Sublime Text
================================================================================
___ _
/ _ \ ___ ___ _ __ ___| |
| | | |/ _ \ / _ \| '_ \/ __| |
| |_| | (_) | (_) | |_) \__ \_|
\___/ \___/ \___/| .__/|___(_)
|_|
Something went wrong... :-[
This is an automatic report from the Dart package for Sublime Text.
Most likely, your settings are off. For help, check out:
* https://github.com/guillermooo/dart-sublime-bundle/wiki/Installation%20and%20Basic%20Configuration
To see a summary of your current settings, open the command palette and select
"Dart: Check Configuration".
---
If you're having trouble running this package, please open an issue in our
issue tracker[1] and paste as much information as possible from the report
below.
[1] https://github.com/dart-lang/dart-sublime-bundle/issues
'''
def check_install():
install_record = os.path.join(sublime.packages_path(),
'User/sublime-dart-plugin-installed.txt')
if os.path.exists(install_record):
return
touch(install_record)
with open(install_record, 'wt') as f:
f.write('autogenerated file. please do not delete.')
ga.Event(category='actions',
action='install',
label='Plugin installed',
value=1,
).send()
def check():
try:
SDK().check_for_critical_configuration_errors()
except FatalConfigError as e:
sublime.active_window().run_command('_dart_report_config_errors', {
'message': str(e)
})
def plugin_loaded():
check()
check_install()
class _dart_report_config_errors(sublime_plugin.WindowCommand):
def run(self, message):
v = OutputPanel('dart.config.check')
text = HEADING + '\n'
text += ('=' * 80) + '\n'
text += 'MESSAGE:\n'
text += message + '\n'
text += '\n'
text += 'CONFIGURATION:\n'
text += ('-' * 80) + '\n'
text += "editor version: {} ({})".format(sublime.version(),
sublime.channel())
text += '\n'
text += ('-' * 80) + '\n'
text += "os: {} ({})".format(sublime.platform(),
sublime.arch())
text += '\n'
text += ('-' * 80) + '\n'
setts = sublime.load_settings('Dart - Plugin Settings.sublime-settings')
text += "dart_sdk_path: {}".format(setts.get('dart_sdk_path'))
text += '\n'
text += '=' * 80
v.write(text)
v.show()
| bsd-3-clause | -760,142,418,297,389,800 | 28.401786 | 100 | 0.565442 | false |
shikhardb/scikit-learn | sklearn/tree/tests/test_export.py | 9 | 2889 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"feature0 <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause | 6,269,548,510,660,223,000 | 31.829545 | 78 | 0.472136 | false |
jcrudy/clinvoc | clinvoc/vocabularies/ccs/icd9.py | 2 | 1242 | # from clinvoc.icd9 import ICD9PCS, ICD9CM
from . import resources
import os
import re
from clinvoc.icd9 import ICD9CM, ICD9PCS
import pandas as pd
from toolz.dicttoolz import merge
# '''
# Single category parsing
# '''
icd9cm_vocab = ICD9CM(use_decimals=False)
icd9pcs_vocab = ICD9PCS(use_decimals=False)
def _get_icd9_codes(filename, code_type):
assert code_type in ['dx', 'px']
vocab = icd9cm_vocab if code_type == 'dx' else icd9pcs_vocab
file_path = os.path.join(resources.resources, filename)
df = pd.read_csv(file_path)
code_column = df.columns[0]
result = {}
for _, row in df.iterrows():
key = (re.sub('\[[^\]]*\]', '', row[2]).strip(), re.sub('\[[^\]]*\]', '', row[4]).strip(), re.sub('\[[^\]]*\]', '', row[6]).strip(), vocab.vocab_domain, vocab.vocab_name)
# if not key[2]:
# key = key[:2] + key[1:2] + key[3:]
if key not in result:
result[key] = set()
result[key].add(vocab.standardize(row[code_column].strip('\'')))
return result
dx_code_sets_dict = _get_icd9_codes('ccs_multi_dx_tool_2015.csv', 'dx')
px_code_sets_dict = _get_icd9_codes('ccs_multi_pr_tool_2015.csv', 'px')
code_sets_dict = merge(dx_code_sets_dict, px_code_sets_dict)
| mit | -607,442,997,935,483,300 | 35.529412 | 178 | 0.618357 | false |
guileschool/BEAGLEBONE-tutorials | BBB-firmware/u-boot-v2018.05-rc2/tools/dtoc/dtb_platdata.py | 1 | 20180 | #!/usr/bin/python
#
# Copyright (C) 2017 Google, Inc
# Written by Simon Glass <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0+
#
"""Device tree to platform data class
This supports converting device tree data to C structures definitions and
static data.
"""
import collections
import copy
import sys
import fdt
import fdt_util
# When we see these properties we ignore them - i.e. do not create a structure member
PROP_IGNORE_LIST = [
'#address-cells',
'#gpio-cells',
'#size-cells',
'compatible',
'linux,phandle',
"status",
'phandle',
'u-boot,dm-pre-reloc',
'u-boot,dm-tpl',
'u-boot,dm-spl',
]
# C type declarations for the tyues we support
TYPE_NAMES = {
fdt.TYPE_INT: 'fdt32_t',
fdt.TYPE_BYTE: 'unsigned char',
fdt.TYPE_STRING: 'const char *',
fdt.TYPE_BOOL: 'bool',
fdt.TYPE_INT64: 'fdt64_t',
}
STRUCT_PREFIX = 'dtd_'
VAL_PREFIX = 'dtv_'
# This holds information about a property which includes phandles.
#
# max_args: integer: Maximum number or arguments that any phandle uses (int).
# args: Number of args for each phandle in the property. The total number of
# phandles is len(args). This is a list of integers.
PhandleInfo = collections.namedtuple('PhandleInfo', ['max_args', 'args'])
def conv_name_to_c(name):
"""Convert a device-tree name to a C identifier
This uses multiple replace() calls instead of re.sub() since it is faster
(400ms for 1m calls versus 1000ms for the 're' version).
Args:
name: Name to convert
Return:
String containing the C version of this name
"""
new = name.replace('@', '_at_')
new = new.replace('-', '_')
new = new.replace(',', '_')
new = new.replace('.', '_')
return new
def tab_to(num_tabs, line):
"""Append tabs to a line of text to reach a tab stop.
Args:
num_tabs: Tab stop to obtain (0 = column 0, 1 = column 8, etc.)
line: Line of text to append to
Returns:
line with the correct number of tabs appeneded. If the line already
extends past that tab stop then a single space is appended.
"""
if len(line) >= num_tabs * 8:
return line + ' '
return line + '\t' * (num_tabs - len(line) // 8)
def get_value(ftype, value):
"""Get a value as a C expression
For integers this returns a byte-swapped (little-endian) hex string
For bytes this returns a hex string, e.g. 0x12
For strings this returns a literal string enclosed in quotes
For booleans this return 'true'
Args:
type: Data type (fdt_util)
value: Data value, as a string of bytes
"""
if ftype == fdt.TYPE_INT:
return '%#x' % fdt_util.fdt32_to_cpu(value)
elif ftype == fdt.TYPE_BYTE:
return '%#x' % ord(value[0])
elif ftype == fdt.TYPE_STRING:
return '"%s"' % value
elif ftype == fdt.TYPE_BOOL:
return 'true'
elif ftype == fdt.TYPE_INT64:
return '%#x' % value
def get_compat_name(node):
"""Get a node's first compatible string as a C identifier
Args:
node: Node object to check
Return:
Tuple:
C identifier for the first compatible string
List of C identifiers for all the other compatible strings
(possibly empty)
"""
compat = node.props['compatible'].value
aliases = []
if isinstance(compat, list):
compat, aliases = compat[0], compat[1:]
return conv_name_to_c(compat), [conv_name_to_c(a) for a in aliases]
class DtbPlatdata(object):
"""Provide a means to convert device tree binary data to platform data
The output of this process is C structures which can be used in space-
constrained encvironments where the ~3KB code overhead of device tree
code is not affordable.
Properties:
_fdt: Fdt object, referencing the device tree
_dtb_fname: Filename of the input device tree binary file
_valid_nodes: A list of Node object with compatible strings
_include_disabled: true to include nodes marked status = "disabled"
_outfile: The current output file (sys.stdout or a real file)
_lines: Stashed list of output lines for outputting in the future
"""
def __init__(self, dtb_fname, include_disabled):
self._fdt = None
self._dtb_fname = dtb_fname
self._valid_nodes = None
self._include_disabled = include_disabled
self._outfile = None
self._lines = []
self._aliases = {}
def setup_output(self, fname):
"""Set up the output destination
Once this is done, future calls to self.out() will output to this
file.
Args:
fname: Filename to send output to, or '-' for stdout
"""
if fname == '-':
self._outfile = sys.stdout
else:
self._outfile = open(fname, 'w')
def out(self, line):
"""Output a string to the output file
Args:
line: String to output
"""
self._outfile.write(line)
def buf(self, line):
"""Buffer up a string to send later
Args:
line: String to add to our 'buffer' list
"""
self._lines.append(line)
def get_buf(self):
"""Get the contents of the output buffer, and clear it
Returns:
The output buffer, which is then cleared for future use
"""
lines = self._lines
self._lines = []
return lines
def out_header(self):
"""Output a message indicating that this is an auto-generated file"""
self.out('''/*
* DO NOT MODIFY
*
* This file was generated by dtoc from a .dtb (device tree binary) file.
*/
''')
def get_phandle_argc(self, prop, node_name):
"""Check if a node contains phandles
We have no reliable way of detecting whether a node uses a phandle
or not. As an interim measure, use a list of known property names.
Args:
prop: Prop object to check
Return:
Number of argument cells is this is a phandle, else None
"""
if prop.name in ['clocks']:
val = prop.value
if not isinstance(val, list):
val = [val]
i = 0
max_args = 0
args = []
while i < len(val):
phandle = fdt_util.fdt32_to_cpu(val[i])
target = self._fdt.phandle_to_node.get(phandle)
if not target:
raise ValueError("Cannot parse '%s' in node '%s'" %
(prop.name, node_name))
prop_name = '#clock-cells'
cells = target.props.get(prop_name)
if not cells:
raise ValueError("Node '%s' has no '%s' property" %
(target.name, prop_name))
num_args = fdt_util.fdt32_to_cpu(cells.value)
max_args = max(max_args, num_args)
args.append(num_args)
i += 1 + num_args
return PhandleInfo(max_args, args)
return None
def scan_dtb(self):
"""Scan the device tree to obtain a tree of nodes and properties
Once this is done, self._fdt.GetRoot() can be called to obtain the
device tree root node, and progress from there.
"""
self._fdt = fdt.FdtScan(self._dtb_fname)
def scan_node(self, root):
"""Scan a node and subnodes to build a tree of node and phandle info
This adds each node to self._valid_nodes.
Args:
root: Root node for scan
"""
for node in root.subnodes:
if 'compatible' in node.props:
status = node.props.get('status')
if (not self._include_disabled and not status or
status.value != 'disabled'):
self._valid_nodes.append(node)
# recurse to handle any subnodes
self.scan_node(node)
def scan_tree(self):
"""Scan the device tree for useful information
This fills in the following properties:
_valid_nodes: A list of nodes we wish to consider include in the
platform data
"""
self._valid_nodes = []
return self.scan_node(self._fdt.GetRoot())
@staticmethod
def get_num_cells(node):
"""Get the number of cells in addresses and sizes for this node
Args:
node: Node to check
Returns:
Tuple:
Number of address cells for this node
Number of size cells for this node
"""
parent = node.parent
na, ns = 2, 2
if parent:
na_prop = parent.props.get('#address-cells')
ns_prop = parent.props.get('#size-cells')
if na_prop:
na = fdt_util.fdt32_to_cpu(na_prop.value)
if ns_prop:
ns = fdt_util.fdt32_to_cpu(ns_prop.value)
return na, ns
def scan_reg_sizes(self):
"""Scan for 64-bit 'reg' properties and update the values
This finds 'reg' properties with 64-bit data and converts the value to
an array of 64-values. This allows it to be output in a way that the
C code can read.
"""
for node in self._valid_nodes:
reg = node.props.get('reg')
if not reg:
continue
na, ns = self.get_num_cells(node)
total = na + ns
if reg.type != fdt.TYPE_INT:
raise ValueError("Node '%s' reg property is not an int")
if len(reg.value) % total:
raise ValueError("Node '%s' reg property has %d cells "
'which is not a multiple of na + ns = %d + %d)' %
(node.name, len(reg.value), na, ns))
reg.na = na
reg.ns = ns
if na != 1 or ns != 1:
reg.type = fdt.TYPE_INT64
i = 0
new_value = []
val = reg.value
if not isinstance(val, list):
val = [val]
while i < len(val):
addr = fdt_util.fdt_cells_to_cpu(val[i:], reg.na)
i += na
size = fdt_util.fdt_cells_to_cpu(val[i:], reg.ns)
i += ns
new_value += [addr, size]
reg.value = new_value
def scan_structs(self):
"""Scan the device tree building up the C structures we will use.
Build a dict keyed by C struct name containing a dict of Prop
object for each struct field (keyed by property name). Where the
same struct appears multiple times, try to use the 'widest'
property, i.e. the one with a type which can express all others.
Once the widest property is determined, all other properties are
updated to match that width.
"""
structs = {}
for node in self._valid_nodes:
node_name, _ = get_compat_name(node)
fields = {}
# Get a list of all the valid properties in this node.
for name, prop in node.props.items():
if name not in PROP_IGNORE_LIST and name[0] != '#':
fields[name] = copy.deepcopy(prop)
# If we've seen this node_name before, update the existing struct.
if node_name in structs:
struct = structs[node_name]
for name, prop in fields.items():
oldprop = struct.get(name)
if oldprop:
oldprop.Widen(prop)
else:
struct[name] = prop
# Otherwise store this as a new struct.
else:
structs[node_name] = fields
upto = 0
for node in self._valid_nodes:
node_name, _ = get_compat_name(node)
struct = structs[node_name]
for name, prop in node.props.items():
if name not in PROP_IGNORE_LIST and name[0] != '#':
prop.Widen(struct[name])
upto += 1
struct_name, aliases = get_compat_name(node)
for alias in aliases:
self._aliases[alias] = struct_name
return structs
def scan_phandles(self):
"""Figure out what phandles each node uses
We need to be careful when outputing nodes that use phandles since
they must come after the declaration of the phandles in the C file.
Otherwise we get a compiler error since the phandle struct is not yet
declared.
This function adds to each node a list of phandle nodes that the node
depends on. This allows us to output things in the right order.
"""
for node in self._valid_nodes:
node.phandles = set()
for pname, prop in node.props.items():
if pname in PROP_IGNORE_LIST or pname[0] == '#':
continue
info = self.get_phandle_argc(prop, node.name)
if info:
if not isinstance(prop.value, list):
prop.value = [prop.value]
# Process the list as pairs of (phandle, id)
pos = 0
for args in info.args:
phandle_cell = prop.value[pos]
phandle = fdt_util.fdt32_to_cpu(phandle_cell)
target_node = self._fdt.phandle_to_node[phandle]
node.phandles.add(target_node)
pos += 1 + args
def generate_structs(self, structs):
"""Generate struct defintions for the platform data
This writes out the body of a header file consisting of structure
definitions for node in self._valid_nodes. See the documentation in
README.of-plat for more information.
"""
self.out_header()
self.out('#include <stdbool.h>\n')
self.out('#include <linux/libfdt.h>\n')
# Output the struct definition
for name in sorted(structs):
self.out('struct %s%s {\n' % (STRUCT_PREFIX, name))
for pname in sorted(structs[name]):
prop = structs[name][pname]
info = self.get_phandle_argc(prop, structs[name])
if info:
# For phandles, include a reference to the target
struct_name = 'struct phandle_%d_arg' % info.max_args
self.out('\t%s%s[%d]' % (tab_to(2, struct_name),
conv_name_to_c(prop.name),
len(info.args)))
else:
ptype = TYPE_NAMES[prop.type]
self.out('\t%s%s' % (tab_to(2, ptype),
conv_name_to_c(prop.name)))
if isinstance(prop.value, list):
self.out('[%d]' % len(prop.value))
self.out(';\n')
self.out('};\n')
for alias, struct_name in self._aliases.iteritems():
self.out('#define %s%s %s%s\n'% (STRUCT_PREFIX, alias,
STRUCT_PREFIX, struct_name))
def output_node(self, node):
"""Output the C code for a node
Args:
node: node to output
"""
struct_name, _ = get_compat_name(node)
var_name = conv_name_to_c(node.name)
self.buf('static struct %s%s %s%s = {\n' %
(STRUCT_PREFIX, struct_name, VAL_PREFIX, var_name))
for pname, prop in node.props.items():
if pname in PROP_IGNORE_LIST or pname[0] == '#':
continue
member_name = conv_name_to_c(prop.name)
self.buf('\t%s= ' % tab_to(3, '.' + member_name))
# Special handling for lists
if isinstance(prop.value, list):
self.buf('{')
vals = []
# For phandles, output a reference to the platform data
# of the target node.
info = self.get_phandle_argc(prop, node.name)
if info:
# Process the list as pairs of (phandle, id)
pos = 0
for args in info.args:
phandle_cell = prop.value[pos]
phandle = fdt_util.fdt32_to_cpu(phandle_cell)
target_node = self._fdt.phandle_to_node[phandle]
name = conv_name_to_c(target_node.name)
arg_values = []
for i in range(args):
arg_values.append(str(fdt_util.fdt32_to_cpu(prop.value[pos + 1 + i])))
pos += 1 + args
vals.append('\t{&%s%s, {%s}}' % (VAL_PREFIX, name,
', '.join(arg_values)))
for val in vals:
self.buf('\n\t\t%s,' % val)
else:
for val in prop.value:
vals.append(get_value(prop.type, val))
# Put 8 values per line to avoid very long lines.
for i in xrange(0, len(vals), 8):
if i:
self.buf(',\n\t\t')
self.buf(', '.join(vals[i:i + 8]))
self.buf('}')
else:
self.buf(get_value(prop.type, prop.value))
self.buf(',\n')
self.buf('};\n')
# Add a device declaration
self.buf('U_BOOT_DEVICE(%s) = {\n' % var_name)
self.buf('\t.name\t\t= "%s",\n' % struct_name)
self.buf('\t.platdata\t= &%s%s,\n' % (VAL_PREFIX, var_name))
self.buf('\t.platdata_size\t= sizeof(%s%s),\n' % (VAL_PREFIX, var_name))
self.buf('};\n')
self.buf('\n')
self.out(''.join(self.get_buf()))
def generate_tables(self):
"""Generate device defintions for the platform data
This writes out C platform data initialisation data and
U_BOOT_DEVICE() declarations for each valid node. Where a node has
multiple compatible strings, a #define is used to make them equivalent.
See the documentation in doc/driver-model/of-plat.txt for more
information.
"""
self.out_header()
self.out('#include <common.h>\n')
self.out('#include <dm.h>\n')
self.out('#include <dt-structs.h>\n')
self.out('\n')
nodes_to_output = list(self._valid_nodes)
# Keep outputing nodes until there is none left
while nodes_to_output:
node = nodes_to_output[0]
# Output all the node's dependencies first
for req_node in node.phandles:
if req_node in nodes_to_output:
self.output_node(req_node)
nodes_to_output.remove(req_node)
self.output_node(node)
nodes_to_output.remove(node)
def run_steps(args, dtb_file, include_disabled, output):
"""Run all the steps of the dtoc tool
Args:
args: List of non-option arguments provided to the problem
dtb_file: Filename of dtb file to process
include_disabled: True to include disabled nodes
output: Name of output file
"""
if not args:
raise ValueError('Please specify a command: struct, platdata')
plat = DtbPlatdata(dtb_file, include_disabled)
plat.scan_dtb()
plat.scan_tree()
plat.scan_reg_sizes()
plat.setup_output(output)
structs = plat.scan_structs()
plat.scan_phandles()
for cmd in args[0].split(','):
if cmd == 'struct':
plat.generate_structs(structs)
elif cmd == 'platdata':
plat.generate_tables()
else:
raise ValueError("Unknown command '%s': (use: struct, platdata)" %
cmd)
| mit | -5,484,930,682,351,137,000 | 34.27972 | 98 | 0.533647 | false |
stratton-oakcoin/oakcoin | test/functional/test_framework/comptool.py | 1 | 18445 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Compare two or more oakcoinds to each other.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
TestNode behaves as follows:
Configure with a BlockStore and TxStore
on_inv: log the message but don't request
on_headers: log the chain tip
on_pong: update ping response map (for synchronization)
on_getheaders: provide headers via BlockStore
on_getdata: provide blocks via BlockStore
"""
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
import logging
logger=logging.getLogger("TestFramework.comptool")
global mininode_lock
class RejectResult(object):
"""Outcome that expects rejection of a transaction or block."""
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
super().__init__()
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
return all(node.wait_for_verack() for node in self.test_nodes)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
else:
[ c.send_message(msg_block(block)) for c in self.connections ]
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
[ c.cb.send_header(block_header) for c in self.connections ]
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
logger.info("Test %d: PASS" % test_number)
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| mit | -5,134,176,956,727,703,000 | 44.208333 | 149 | 0.582434 | false |
marcoitur/Freecad_test | src/Mod/OpenSCAD/prototype.py | 27 | 29821 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (LGPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
import FreeCAD
import re,math
from OpenSCADFeatures import *
from OpenSCAD2Dgeom import *
from OpenSCADUtils import *
if open.__module__ == '__builtin__':
pythonopen = open # to distinguish python built-in open function from the one declared here
def openscadmesh(doc,scadstr,objname):
import Part,Mesh,os,OpenSCADUtils
tmpfilename=OpenSCADUtils.callopenscadstring(scadstr,'stl')
if tmpfilename:
#mesh1 = doc.getObject(objname) #reuse imported object
Mesh.insert(tmpfilename)
os.unlink(tmpfilename)
mesh1=doc.getObject(objname) #blog
mesh1.ViewObject.hide()
sh=Part.Shape()
sh.makeShapeFromMesh(mesh1.Mesh.Topology,0.1)
solid = Part.Solid(sh)
obj=doc.addObject("Part::FeaturePython",objname)
ImportObject(obj,mesh1) #This object is not mutable from the GUI
ViewProviderTree(obj.ViewObject)
solid=solid.removeSplitter()
if solid.Volume < 0:
solid.complement()
obj.Shape=solid#.removeSplitter()
return obj
else:
print scadstr
class Node:
#fnmin=12 # maximal fn for implicit polygon renderfing
fnmin= FreeCAD.ParamGet(\
"User parameter:BaseApp/Preferences/Mod/OpenSCAD").\
GetInt('useMaxFN')
planedim=1e10 #size of the sqaure used as x-y-plane
def __init__(self,name,arguments=None,children=None,):
pass
self.name=name
self.arguments=arguments or {}
self.children=children or []
def __repr__(self):
str1 ='Node(name=%s' % self.name
if self.arguments:
str1 += ',arguments=%s' % self.arguments
if self.children:
str1 += ',children=%s' % self.children
return str1+')'
def __nonzero__(self):
'''a Node is not obsolent if doesn't have children. Only if as neither name children or
arguments'''
return bool(self.name or self.arguments or self.children)
def __len__(self):
'''return the numer of children'''
return len(self.children)
def __getitem__(self,key):
'''dirct access to the children'''
return self.children.__getitem__(key)
def rlen(self,checkmultmarix=False):
'''Total number of nodes'''
if self.children:
return 1+sum([ch.rlen() for ch in self.children])
else:
return 1
def addtofreecad(self,doc=None,fcpar=None):
def center(obj,x,y,z):
obj.Placement = FreeCAD.Placement(\
FreeCAD.Vector(-x/2.0,-y/2.0,-z/2.0),\
FreeCAD.Rotation(0,0,0,1))
import FreeCAD,Part
if not doc:
doc=FreeCAD.newDocument()
namel=self.name.lower()
multifeature={'union':"Part::MultiFuse",'imp_union':"Part::MultiFuse",
'intersection':"Part::MultiCommon"}
if namel in multifeature:
if len(self.children)>1:
obj=doc.addObject(multifeature[namel],namel)
subobjs = [child.addtofreecad(doc,obj) for child in self.children]
obj.Shapes = subobjs
for subobj in subobjs:
subobj.ViewObject.hide()
elif len(self.children)==1:
obj = self.children[0].addtofreecad(doc,fcpar or True)
else:
obj = fcpar
elif namel == 'difference':
if len(self.children)==1:
obj = self.children[0].addtofreecad(doc,fcpar or True)
else:
obj=doc.addObject("Part::Cut",namel)
base = self.children[0].addtofreecad(doc,obj)
if len(self.children)==2:
tool = self.children[1].addtofreecad(doc,obj)
else:
tool = Node(name='imp_union',\
children=self.children[1:]).addtofreecad(doc,obj)
obj.Base = base
obj.Tool = tool
base.ViewObject.hide()
tool.ViewObject.hide()
elif namel == 'cube':
obj=doc.addObject('Part::Box',namel)
x,y,z=self.arguments['size']
obj.Length=x
obj.Width=y
obj.Height=z
if self.arguments['center']:
center(obj,x,y,z)
elif namel == 'sphere':
obj=doc.addObject("Part::Sphere",namel)
obj.Radius = self.arguments['r']
elif namel == 'cylinder':
h = self.arguments['h']
r1 ,r2 = self.arguments['r1'], self.arguments['r2']
if '$fn' in self.arguments and self.arguments['$fn'] > 2 \
and self.arguments['$fn']<=Node.fnmin: # polygonal
if r1 == r2: # prismatic
obj = doc.addObject("Part::Prism","prism")
obj.Polygon = int(self.arguments['$fn'])
obj.Circumradius = r1
obj.Height = h
if self.arguments['center']:
center(obj,0,0,h)
#base.ViewObject.hide()
elif False: #use Frustum Feature with makeRuledSurface
obj=doc.addObject("Part::FeaturePython",'frustum')
Frustum(obj,r1,r2,int(self.arguments['$fn']),h)
ViewProviderTree(obj.ViewObject)
if self.arguments['center']:
center(obj,0,0,h)
else: #Use Part::Loft and GetWire Feature
obj=doc.addObject('Part::Loft','frustum')
import Draft
p1 = Draft.makePolygon(int(self.arguments['$fn']),r1)
p2 = Draft.makePolygon(int(self.arguments['$fn']),r2)
if self.arguments['center']:
p1.Placement = FreeCAD.Placement(\
FreeCAD.Vector(0.0,0.0,-h/2.0),FreeCAD.Rotation())
p2.Placement = FreeCAD.Placement(\
FreeCAD.Vector(0.0,0.0,h/2.0),FreeCAD.Rotation())
else:
p2.Placement = FreeCAD.Placement(\
FreeCAD.Vector(0.0,0.0,h),FreeCAD.Rotation())
w1=doc.addObject("Part::FeaturePython",'polygonwire1')
w2=doc.addObject("Part::FeaturePython",'polygonwire2')
GetWire(w1,p1)
GetWire(w2,p2)
ViewProviderTree(w1.ViewObject)
ViewProviderTree(w2.ViewObject)
obj.Sections=[w1,w2]
obj.Solid=True
obj.Ruled=True
p1.ViewObject.hide()
p2.ViewObject.hide()
w1.ViewObject.hide()
w2.ViewObject.hide()
else:
if r1 == r2:
obj=doc.addObject("Part::Cylinder",namel)
obj.Height = h
obj.Radius = r1
else:
obj=doc.addObject("Part::Cone",'cone')
obj.Height = h
obj.Radius1, obj.Radius2 = r1, r2
if self.arguments['center']:
center(obj,0,0,h)
elif namel == 'polyhedron':
obj = doc.addObject("Part::Feature",namel)
points=self.arguments['points']
faces=self.arguments['triangles']
shell=Part.Shell([Part.Face(Part.makePolygon(\
[tuple(points[pointindex]) for pointindex in \
(face+face[0:1])])) for face in faces])
# obj.Shape=Part.Solid(shell).removeSplitter()
solid=Part.Solid(shell).removeSplitter()
if solid.Volume < 0:
# solid.complement()
solid.reverse()
obj.Shape=solid#.removeSplitter()
elif namel == 'polygon':
obj = doc.addObject("Part::Feature",namel)
points=self.arguments['points']
paths = self.arguments.get('paths')
if not paths:
faces=[Part.Face(Part.makePolygon([(x,y,0) for x,y in points+points[0:1]]))]
else:
faces= [Part.Face(Part.makePolygon([(points[pointindex][0],points[pointindex][1],0) for \
pointindex in (path+path[0:1])])) for path in paths]
obj.Shape=subtractfaces(faces)
elif namel == 'square':
obj = doc.addObject("Part::Plane",namel)
x,y = self.arguments['size']
obj.Length = x
obj.Width = y
if self.arguments['center']:
center(obj,x,y,0)
elif namel == 'circle':
r = self.arguments['r']
import Draft
if '$fn' in self.arguments and self.arguments['$fn'] != 0 \
and self.arguments['$fn']<=Node.fnmin:
obj=Draft.makePolygon(int(self.arguments['$fn']),r)
else:
obj=Draft.makeCircle(r) # create a Face
#obj = doc.addObject("Part::Circle",namel);obj.Radius = r
elif namel == 'color':
if len(self.children) == 1:
obj = self.children[0].addtofreecad(doc,fcpar or True)
else:
obj = Node(name='imp_union',\
children=self.children).addtofreecad(doc,fcpar or True)
obj.ViewObject.ShapeColor = tuple([float(p) for p in self.arguments[:3]]) #RGB
transp = 100 - int(math.floor(100*self.arguments[3])) #Alpha
obj.ViewObject.Transparency = transp
elif namel == 'multmatrix':
assert(len(self.children)>0)
m1l=[round(f,12) for f in sum(self.arguments,[])] #Thats the original matrix
m1=FreeCAD.Matrix(*tuple(m1l)) #Thats the original matrix
if isspecialorthogonalpython(fcsubmatrix(m1)): #a Placement can represent the transformation
if len(self.children) == 1:
obj = self.children[0].addtofreecad(doc,fcpar or True)
else:
obj = Node(name='imp_union',\
children=self.children).addtofreecad(doc,fcpar or True)
#FreeCAD.Console.PrintMessage('obj %s\nmat %s/n' % (obj.Placement,m1))
obj.Placement=FreeCAD.Placement(m1).multiply(obj.Placement)
else: #we need to apply the matrix transformation to the Shape using a custom PythonFeature
obj=doc.addObject("Part::FeaturePython",namel)
if len(self.children) == 1:
child = self.children[0].addtofreecad(doc,obj)
else:
child = Node(name='imp_union',\
children=self.children).addtofreecad(doc,obj)
MatrixTransform(obj,m1,child) #This object is not mutable from the GUI
ViewProviderTree(obj.ViewObject)
#elif namel == 'import': pass #Custom Feature
elif namel == 'linear_extrude':
height = self.arguments['height']
twist = self.arguments.get('twist')
if not twist:
obj = doc.addObject("Part::Extrusion",namel)
else: #twist
obj=doc.addObject("Part::FeaturePython",'twist_extrude')
if len(self.children)==0:
base= Node('import',self.arguments).addtofreecad(doc,obj)
elif len(self.children)==1:
base = self.children[0].addtofreecad(doc,obj)
else:
base = Node(name='imp_union',\
children=self.children).addtofreecad(doc,obj)
if False and base.isDerivedFrom('Part::MultiFuse'):
#does not solve all the problems
newobj=doc.addObject("Part::FeaturePython",'refine')
RefineShape(newobj,base)
ViewProviderTree(newobj.ViewObject)
base.ViewObject.hide()
base=newobj
if not twist:
obj.Base= base
obj.Dir = (0,0,height)
else: #twist
Twist(obj,base,height,-twist)
ViewProviderTree(obj.ViewObject)
if self.arguments['center']:
center(obj,0,0,height)
base.ViewObject.hide()
elif namel == 'rotate_extrude':
obj = doc.addObject("Part::Revolution",namel)
if len(self.children)==0:
base= Node('import',self.arguments).addtofreecad(doc,obj)
elif len(self.children)==1:
base = self.children[0].addtofreecad(doc,obj)
else:
base = Node(name='imp_union',\
children=self.children).addtofreecad(doc,obj)
if False and base.isDerivedFrom('Part::MultiFuse'):
#creates 'Axe and meridian are confused' Errors
newobj=doc.addObject("Part::FeaturePython",'refine')
RefineShape(newobj,base)
ViewProviderTree(newobj.ViewObject)
base.ViewObject.hide()
base=newobj
obj.Source= base
obj.Axis = (0.00,1.00,0.00)
obj.Base = (0.00,0.00,0.00)
obj.Angle = 360.00
base.ViewObject.hide()
obj.Placement=FreeCAD.Placement(FreeCAD.Vector(),FreeCAD.Rotation(0,0,90))
elif namel == 'projection':
if self.arguments['cut']:
planename='xy_plane_used_for_project_cut'
obj=doc.addObject('Part::MultiCommon','projection_cut')
plane = doc.getObject(planename)
if not plane:
plane=doc.addObject("Part::Plane",planename)
plane.Length=Node.planedim*2
plane.Width=Node.planedim*2
plane.Placement = FreeCAD.Placement(FreeCAD.Vector(\
-Node.planedim,-Node.planedim,0),FreeCAD.Rotation(0,0,0,1))
#plane.ViewObject.hide()
subobjs = [child.addtofreecad(doc,obj) for child in self.children]
subobjs.append(plane)
obj.Shapes = subobjs
for subobj in subobjs:
subobj.ViewObject.hide()
else:
#Do a proper projection
raise(NotImplementedError)
elif namel == 'import':
filename = self.arguments.get('file')
scale = self.arguments.get('scale')
origin = self.arguments.get('origin')
if filename:
import os
docname=os.path.split(filename)[1]
objname,extension = docname.split('.',1)
if not os.path.isabs(filename):
try:
global lastimportpath
filename=os.path.join(lastimportpath,filename)
except: raise #no path given
# Check for a mesh fileformat support by the Mesh mddule
if extension.lower() in reverseimporttypes()['Mesh']:
import Mesh
mesh1 = doc.getObject(objname) #reuse imported object
if not mesh1:
Mesh.insert(filename)
mesh1=doc.getObject(objname)
mesh1.ViewObject.hide()
sh=Part.Shape()
sh.makeShapeFromMesh(mesh1.Mesh.Topology,0.1)
solid = Part.Solid(sh)
obj=doc.addObject("Part::FeaturePython",'import_%s_%s'%(extension,objname))
#obj=doc.addObject('Part::Feature',)
ImportObject(obj,mesh1) #This object is not mutable from the GUI
ViewProviderTree(obj.ViewObject)
solid=solid.removeSplitter()
if solid.Volume < 0:
#sh.reverse()
#sh = sh.copy()
solid.complement()
obj.Shape=solid#.removeSplitter()
elif extension in ['dxf']:
layera = self.arguments.get('layer')
featname='import_dxf_%s_%s'%(objname,layera)
# reusing an allready imported object does not work if the
#shape in not yet calculated
import importDXF
global dxfcache
layers=dxfcache.get(id(doc),[])
if layers:
groupobj=[go for go in layers if (not layera) or go.Label == layera]
else:
groupobj= None
if not groupobj:
groupname=objname
layers = importDXF.processdxf(doc,filename) or importDXF.layers
dxfcache[id(doc)] = layers[:]
for l in layers:
for o in l.Group:
o.ViewObject.hide()
l.ViewObject.hide()
groupobj=[go for go in layers if (not layera) or go.Label == layera]
edges=[]
for shapeobj in groupobj[0].Group:
edges.extend(shapeobj.Shape.Edges)
try:
f=edgestofaces(edges)
except Part.OCCError:
FreeCAD.Console.PrintError(\
'processing of dxf import faild\nPlease rework \'%s\' manualy\n' % layera)
f=Part.Shape() #empty Shape
obj=doc.addObject("Part::FeaturePython",'import_dxf_%s_%s'%(objname,layera))
#obj=doc.addObject('Part::Feature',)
ImportObject(obj,groupobj[0]) #This object is not mutable from the GUI
ViewProviderTree(obj.ViewObject)
obj.Shape=f
else:
FreeCAD.Console.ErrorMessage(\
'Filetype of %s not supported\n' % (filename))
raise(NotImplementedError)
if obj: #handle origin and scale
if scale is not None and scale !=1:
if origin is not None and any([c != 0 for c in origin]):
raise(NotImplementedError)# order of transformations unkown
child = obj
m1=FreeCAD.Matrix()
m1.scale(scale,scale,scale)
obj=doc.addObject("Part::FeaturePython",'scale_import')
MatrixTransform(obj,m1,child) #This object is not mutable from the GUI
ViewProviderTree(obj.ViewObject)
elif origin is not None and any([c != 0 for c in origin]):
placement=FreeCAD.Placement(FreeCAD.Vector(*[-c for c in origin]),FreeCAD.Rotation())
obj.Placement=placement.multiply(obj.Placement)
else:
FreeCAD.Console.ErrorMessage('Import of %s failed\n' % (filename))
elif namel == 'minkowski':
childrennames=[child.name.lower() for child in self.children]
if len(self.children) == 2 and \
childrennames.count('cube')==1 and \
(childrennames.count('sphere') + \
childrennames.count('cylinder')) == 1:
if self.children[0].name.lower() == 'cube':
cube = self.children[0]
roundobj = self.children[1]
elif self.children[1].name.lower() == 'cube':
cube = self.children[1]
roundobj = self.children[0]
roundobjname=roundobj.name.lower()
issphere = roundobjname == 'sphere'
cubeobj=doc.addObject('Part::Box','roundedcube')
x,y,z=cube.arguments['size']
r=roundobj.arguments.get('r') or \
roundobj.arguments.get('r1')
cubeobj.Length=x+2*r
cubeobj.Width=y+2*r
cubeobj.Height=z+2*r*issphere
obj=doc.addObject("Part::Fillet","%s_%s"%(namel,roundobjname))
obj.Base = cubeobj
cubeobj.ViewObject.hide()
if issphere:
obj.Edges = [(i,r,r) for i in range(1,13)]
else:#cylinder
obj.Edges = [(i,r,r) for i in [1,3,5,7]]
if cube.arguments['center']:
center(cubeobj,x+2*r,y+2*r,z+2*r*issphere)
else: #htandle a rotated cylinder
#OffsetShape
raise(NotImplementedError)
elif childrennames.count('sphere')==1:
sphereindex=childrennames.index('sphere')
sphere=self.children[sphereindex]
offset=sphere.arguments['r']
nonsphere=self.children[0:sphereindex]+\
self.sphere[sphereindex+1:]
obj=doc.addObject("Part::FeaturePython",'Offset')
if len(nonsphere) == 1:
child = nonsphere[0].addtofreecad(doc,obj)
else:
child = Node(name='imp_union',\
children=nonsphere).addtofreecad(doc,obj)
OffsetShape(obj,child,offset)
ViewProviderTree(obj.ViewObject)
elif False:
raise(NotImplementedError)
pass # handle rotated cylinders and select edges that
#radius = radius0 * m1.multiply(FreeCAD.Vector(0,0,1)).dot(edge.Curve.tangent(0)[0])
else:
raise(NotImplementedError)
elif namel == 'surface':
obj = doc.addObject("Part::Feature",namel) #include filename?
obj.Shape,xoff,yoff=makeSurfaceVolume(self.arguments['file'])
if self.arguments['center']:
center(obj,xoff,yoff,0.0)
return obj
#import os
#scadstr = 'surface(file = "%s", center = %s );' % \
# (self.arguments['file'], 'true' if self.arguments['center'] else 'false')
#docname=os.path.split(self.arguments['file'])[1]
#objname,extension = docname.split('.',1)
#obj = openscadmesh(doc,scadstr,objname)
elif namel in ['glide','hull']:
raise(NotImplementedError)
elif namel in ['render','subdiv'] or True:
lenchld=len(self.children)
if lenchld == 1:
FreeCAD.Console.PrintMessage('Not recognized %s\n' % (self))
obj = self.children[0].addtofreecad(doc,fcpar)
elif lenchld >1:
obj = Node(name='imp_union',\
children=self.children).addtofreecad(doc,fcpar or True)
else:
obj = doc.addObject("Part::Feature",'Not_Impl_%s'%namel)
if fcpar == True: #We are the last real object, our parent is not rendered.
return obj
if fcpar:
try:
obj.ViewObject.hide()
except: raise
if True: #never refine the Shape, as it itroduces crashes
return obj
else: #refine Shape
import Draft
if obj.Type =='Part::Extrusion' and obj.Base.Type == 'Part::Part2DObjectPython' and \
isinstance(obj.Base.Proxy,Draft._Polygon) or \
(not obj.isDerivedFrom('Part::Extrusion') and \
not obj.isDerivedFrom('Part::Boolean') and \
not obj.isDerivedFrom('Part::Cut') and \
not obj.isDerivedFrom('Part::MultiCommon') and \
not obj.isDerivedFrom('Part::MultiFuse') and \
not obj.isDerivedFrom('Part::Revolution') ) \
or (obj.isDerivedFrom('Part::FeaturePython') and isinstance(obj.Proxy,RefineShape)):
return obj
else:
newobj=doc.addObject("Part::FeaturePython",'refine')
RefineShape(newobj,obj)
ViewProviderTree(newobj.ViewObject)
obj.ViewObject.hide()
return newobj
else:
doc.recompute()
def flattengroups(self,name='group'):
"""removes group node with only one child and no arguments and empty groups"""
node=self
while (node.name==name and len(node.children)==1 and len(node.arguments)==0):
node=node.children[0]
node.children=[child for child in node.children if not (len(child.children)==0 and child.name==name)]
if node.children:
node.children = [child.flattengroups() for child in node.children]
return node
def pprint(self,level=0):
"""prints the indented tree"""
if self.arguments:
argstr = ' (%s)' % self.arguments
else:
argstr = ''
print '%s %s%s' %(' '*level,self.name,argstr)
for child in self.children:
child.pprint(level+1)
def pprint2(self,path='root',pathjust=24):
"""prints the tree. Left column contains the the systax to access a child"""
if self.arguments:
argstr = ' (%s)' % self.arguments
else:
argstr = ''
print '%s %s%s' %(path.ljust(pathjust),self.name,argstr)
for i,child in enumerate(self.children):
child.pprint2('%s[%d]'%(path,i),pathjust)
def parseexpression(e):
e=e.strip()
el = e.lower()
if len(el)==0: return None
if el == 'true': return True
elif el == 'false': return False
elif el == 'undef': return None
elif e[0].isdigit() or e[0] == '-' and len(e)>1 and e[1].isdigit():
try:
return float(e)
except ValueError:
import FreeCAD
FreeCAD.Console.PrintMessage('%s\n' % (el))
return 1.0
elif el.startswith('"'): return e.strip('"') #string literal
elif el.startswith('['):
bopen, bclose = e.count('['), e.count(']')
if bopen == bclose:
return eval(el)
else:
import FreeCAD
FreeCAD.Console.PrintMessage('%s\n' % (el))
#return eval(el)
#assert(False) #Malformed
else:
return e #Return the string
def parseargs(argstring):
if '=' in argstring:
level=0
tok=[]
a=[]
for i,char in enumerate(argstring):
if char=='[': level+=1
elif char ==']': level -=1
if level==0 and (char=='=' or char==','):
tok.append(''.join(a).strip())
a=[]
else:
a.append(char)
tok.append(''.join(a).strip())
#print tok
argdict=dict(zip(tok[0::2],[parseexpression(argstring) for argstring in tok[1::2]]))
# argdict={}
# for key, value in re.findall(r"(\$?\w+)\s*=\s*(\[?\w+]?),?\s*",argstring):
# argdict[key] = parseexpression(value)
return argdict
else:
return parseexpression(argstring)
def parsenode(str1):
name,str2=str1.strip().split('(',1)
assert('}' not in name)
name=name.strip('#!%* ')#remove/ignore modifiers
args,str3=str2.split(')',1)
str4=str3.lstrip()
if str4.startswith(';'):
#has no children
nextelement=str4[1:].lstrip()
return Node(name,parseargs(args)),nextelement
elif str4.startswith('{'):
#has children
level=0
for index,char in enumerate(str4):
if char == '{': level += 1
elif char == '}': level -= 1
if level == 0:
break
#end of children
childstr= str4[1:index].strip()
nextelement = str4[index+1:].lstrip()
bopen,bclose=childstr.count('{'),childstr.count('}')
assert(bopen == bclose)
children=[]
while childstr:
try:
childnode,childstr=parsenode(childstr)
children.append(childnode)
except ValueError:
raise
if args:
args=parseargs(args)
return Node(name,args,children),nextelement
def readfile(filename):
import os
global lastimportpath
lastimportpath,relname = os.path.split(filename)
isopenscad = relname.lower().endswith('.scad')
if isopenscad:
tmpfile=callopenscad(filename)
if OpenSCADUtils.workaroundforissue128needed():
lastimportpath = os.getcwd() #https://github.com/openscad/openscad/issues/128
f = pythonopen(tmpfile)
else:
f = pythonopen(filename)
rootnode=parsenode(f.read())[0]
f.close()
if isopenscad and tmpfile:
try:
os.unlink(tmpfile)
except OSError:
pass
return rootnode.flattengroups()
def open(filename):
import os
docname=os.path.split(filename)[1]
doc=FreeCAD.newDocument(docname)
doc.Label = (docname.split('.',1)[0])
readfile(filename).addtofreecad(doc)
#doc.recompute()
return doc
def insert(filename,docname):
try:
doc=FreeCAD.getDocument(docname)
except NameError:
doc=FreeCAD.newDocument(docname)
readfile(filename).addtofreecad(doc)
#doc.recompute()
global dxfcache
dxfcache = {}
| lgpl-2.1 | 6,419,621,455,341,096,000 | 42.093931 | 109 | 0.521411 | false |
jlaurelli/stockify | web/tests/test_env_settings.py | 1 | 1248 | """Test Environmental settings are handled properly."""
import os
import importlib
from unittest.mock import patch
# from django.test import TestCase
# from unittest import skip
# we have to use tools outside of django, because when it's initialized
# it's too late to change environment variables
from unittest import TestCase, main
class DebugSettingTest(TestCase):
"""Test if setting DEBUG is handled properly."""
_variants = {
True: ('Yes', 'YES', 'Y', 'TRUE', 'tRUE', 'true', 'On'),
False: ('No', 'nO', 'N', 'n', 'false', 'False', 'off', 'oFF'),
}
env_var_debug = 'DEBUG'
def test_debug_setting(self):
"""Check if config accepts environment variable DEBUG and sets it."""
from stockify import settings
for result, words in self._variants.items():
for word in words:
# print(word, result)
with patch.dict('os.environ', {self.env_var_debug: word}):
importlib.reload(settings)
assert self.env_var_debug in os.environ
self.assertEqual(settings.DEBUG, result)
assert self.env_var_debug not in os.environ # should be True
if __name__ == '__main__':
main()
| mit | 2,996,270,555,049,597,000 | 32.72973 | 77 | 0.610577 | false |
vgrachev8/youtube-dl | youtube_dl/extractor/nfb.py | 7 | 3584 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_request,
compat_urllib_parse,
)
class NFBIE(InfoExtractor):
IE_NAME = 'nfb'
IE_DESC = 'National Film Board of Canada'
_VALID_URL = r'https?://(?:www\.)?(nfb|onf)\.ca/film/(?P<id>[\da-z_-]+)'
_TEST = {
'url': 'https://www.nfb.ca/film/qallunaat_why_white_people_are_funny',
'info_dict': {
'id': 'qallunaat_why_white_people_are_funny',
'ext': 'mp4',
'title': 'Qallunaat! Why White People Are Funny ',
'description': 'md5:836d8aff55e087d04d9f6df554d4e038',
'duration': 3128,
'uploader': 'Mark Sandiford',
'uploader_id': 'mark-sandiford',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage('https://www.nfb.ca/film/%s' % video_id, video_id, 'Downloading film page')
uploader_id = self._html_search_regex(r'<a class="director-link" href="/explore-all-directors/([^/]+)/"',
page, 'director id', fatal=False)
uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>',
page, 'director name', fatal=False)
request = compat_urllib_request.Request('https://www.nfb.ca/film/%s/player_config' % video_id,
compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
config = self._download_xml(request, video_id, 'Downloading player config XML')
title = None
description = None
thumbnail = None
duration = None
formats = []
def extract_thumbnail(media):
thumbnails = {}
for asset in media.findall('assets/asset'):
thumbnails[asset.get('quality')] = asset.find('default/url').text
if not thumbnails:
return None
if 'high' in thumbnails:
return thumbnails['high']
return list(thumbnails.values())[0]
for media in config.findall('./player/stream/media'):
if media.get('type') == 'posterImage':
thumbnail = extract_thumbnail(media)
elif media.get('type') == 'video':
duration = int(media.get('duration'))
title = media.find('title').text
description = media.find('description').text
# It seems assets always go from lower to better quality, so no need to sort
formats = [{
'url': x.find('default/streamerURI').text,
'app': x.find('default/streamerURI').text.split('/', 3)[3],
'play_path': x.find('default/url').text,
'rtmp_live': False,
'ext': 'mp4',
'format_id': x.get('quality'),
} for x in media.findall('assets/asset')]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'formats': formats,
} | unlicense | -7,381,620,407,547,731,000 | 37.138298 | 113 | 0.540179 | false |
minlex/django-socialregistration | tests/app/views.py | 8 | 1238 | from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from socialregistration.contrib.facebook.models import FacebookProfile
from socialregistration.contrib.foursquare.models import FoursquareProfile
from socialregistration.contrib.github.models import GithubProfile
from socialregistration.contrib.instagram.models import InstagramProfile
from socialregistration.contrib.linkedin.models import LinkedInProfile
from socialregistration.contrib.openid.models import OpenIDProfile
from socialregistration.contrib.tumblr.models import TumblrProfile
from socialregistration.contrib.twitter.models import TwitterProfile
def index(request):
return render_to_response(
'index.html', dict(
facebook=FacebookProfile.objects.all(),
twitter=TwitterProfile.objects.all(),
openid=OpenIDProfile.objects.all(),
linkedin=LinkedInProfile.objects.all(),
github=GithubProfile.objects.all(),
foursquare=FoursquareProfile.objects.all(),
tumblr=TumblrProfile.objects.all(),
instagram=InstagramProfile.objects.all(),
), context_instance=RequestContext(request))
| mit | 5,831,158,185,763,440,000 | 48.52 | 74 | 0.779483 | false |
jaredkoontz/leetcode | Python/flatten-nested-list-iterator.py | 3 | 1916 | # Time: O(n), n is the number of the integers.
# Space: O(h), h is the depth of the nested lists.
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class NestedIterator(object):
def __init__(self, nestedList):
"""
Initialize your data structure here.
:type nestedList: List[NestedInteger]
"""
self.__depth = [[nestedList, 0]]
def next(self):
"""
:rtype: int
"""
nestedList, i = self.__depth[-1]
self.__depth[-1][1] += 1
return nestedList[i].getInteger()
def hasNext(self):
"""
:rtype: bool
"""
while self.__depth:
nestedList, i = self.__depth[-1]
if i == len(nestedList):
self.__depth.pop()
elif nestedList[i].isInteger():
return True
else:
self.__depth[-1][1] += 1
self.__depth.append([nestedList[i].getList(), 0])
return False
# Your NestedIterator object will be instantiated and called as such:
# i, v = NestedIterator(nestedList), []
# while i.hasNext(): v.append(i.next())
| mit | -1,797,174,466,296,585,700 | 28.030303 | 95 | 0.561065 | false |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/calibration.py | 7 | 20044 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from sklearn.preprocessing import LabelEncoder
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import label_binarize, LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .utils.fixes import signature
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is assumed that base_estimator has been fitted already and all
data is used for calibration. Note that data for fitting the
classifier and for calibrating it must be disjoint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' or 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach. It is not advised to use isotonic calibration
with too few calibration samples ``(<<1000)`` since it tends to
overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
neither binary nor multiclass, :class:`sklearn.model_selection.KFold`
is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
le = LabelBinarizer().fit(y)
self.classes_ = le.classes_
# Check that each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in
self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in fit_parameters):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv.split(X, y):
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method,
classes=self.classes_)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
classes : array-like, shape (n_classes,), optional
Contains unique classes used to fit the base estimator.
if None, then classes is extracted from the given target values
in fit().
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid', classes=None):
self.base_estimator = base_estimator
self.method = method
self.classes = classes
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = self.label_encoder_.\
transform(self.base_estimator.classes_)
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
self.label_encoder_ = LabelEncoder()
if self.classes is None:
self.label_encoder_.fit(y)
else:
self.label_encoder_.fit(self.classes)
self.classes_ = self.label_encoder_.classes_
Y = label_binarize(y, self.classes_)
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| mit | 8,433,434,882,342,731,000 | 34.288732 | 79 | 0.593095 | false |
samuelmaudo/yepes | yepes/contrib/datamigrations/importation_plans/update_or_bulk_create.py | 1 | 1986 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import collections
from django.utils import six
from yepes.contrib.datamigrations.importation_plans import ModelImportationPlan
class UpdateOrBulkCreatePlan(ModelImportationPlan):
def import_batch(self, batch):
model = self.migration.model
manager = model._base_manager
objs = self.get_existing_objects(batch)
if not objs:
manager.bulk_create(
model(**row)
for row
in batch
)
else:
key = self.migration.primary_key
new_objs = []
if not isinstance(key, collections.Iterable):
key_attr = key.attname
for row in batch:
obj = objs.get(row[key_attr])
if obj is not None:
is_modified = False
for k, v in six.iteritems(row):
if v != getattr(obj, k):
setattr(obj, k, v)
is_modified = True
if is_modified:
obj.save(force_update=True)
else:
new_objs.append(model(**row))
else:
key_attrs = [k.attname for k in key]
for row in batch:
obj = objs.get(tuple(row[attr] for attr in key_attrs))
if obj is not None:
is_modified = False
for k, v in six.iteritems(row):
if v != getattr(obj, k):
setattr(obj, k, v)
is_modified = True
if is_modified:
obj.save(force_update=True)
else:
new_objs.append(model(**row))
manager.bulk_create(new_objs)
| bsd-3-clause | 2,911,455,087,934,738,000 | 33.842105 | 79 | 0.441591 | false |
uwescience/myria-web | appengine/networkx/algorithms/shortest_paths/astar.py | 15 | 4913 | # -*- coding: utf-8 -*-
"""Shortest paths and path lengths using A* ("A star") algorithm.
"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
from heapq import heappush, heappop
from networkx import NetworkXError
import networkx as nx
__author__ = "\n".join(["Salim Fadhley <[email protected]>",
"Matteo Dell'Amico <[email protected]>"])
__all__ = ['astar_path', 'astar_path_length']
def astar_path(G, source, target, heuristic=None, weight='weight'):
"""Return a list of nodes in a shortest path between source and target
using the A* ("A-star") algorithm.
There may be more than one shortest path. This returns only one.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.astar_path(G,0,4))
[0, 1, 2, 3, 4]
>>> G=nx.grid_graph(dim=[3,3]) # nodes are two-tuples (x,y)
>>> def dist(a, b):
... (x1, y1) = a
... (x2, y2) = b
... return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
>>> print(nx.astar_path(G,(0,0),(2,2),dist))
[(0, 0), (0, 1), (1, 1), (1, 2), (2, 2)]
See Also
--------
shortest_path, dijkstra_path
"""
if G.is_multigraph():
raise NetworkXError("astar_path() not implemented for Multi(Di)Graphs")
if heuristic is None:
# The default heuristic is h=0 - same as Dijkstra's algorithm
def heuristic(u, v):
return 0
# The queue stores priority, node, cost to reach, and parent.
# Uses Python heapq to keep in priority order.
# Add each node's hash to the queue to prevent the underlying heap from
# attempting to compare the nodes themselves. The hash breaks ties in the
# priority and is guarenteed unique for all nodes in the graph.
queue = [(0, hash(source), source, 0, None)]
# Maps enqueued nodes to distance of discovered paths and the
# computed heuristics to target. We avoid computing the heuristics
# more than once and inserting the node into the queue too many times.
enqueued = {}
# Maps explored nodes to parent closest to the source.
explored = {}
while queue:
# Pop the smallest item from queue.
_, __, curnode, dist, parent = heappop(queue)
if curnode == target:
path = [curnode]
node = parent
while node is not None:
path.append(node)
node = explored[node]
path.reverse()
return path
if curnode in explored:
continue
explored[curnode] = parent
for neighbor, w in G[curnode].items():
if neighbor in explored:
continue
ncost = dist + w.get(weight, 1)
if neighbor in enqueued:
qcost, h = enqueued[neighbor]
# if qcost < ncost, a longer path to neighbor remains
# enqueued. Removing it would need to filter the whole
# queue, it's better just to leave it there and ignore
# it when we visit the node a second time.
if qcost <= ncost:
continue
else:
h = heuristic(neighbor, target)
enqueued[neighbor] = ncost, h
heappush(queue, (ncost + h, hash(neighbor), neighbor,
ncost, curnode))
raise nx.NetworkXNoPath("Node %s not reachable from %s" % (source, target))
def astar_path_length(G, source, target, heuristic=None, weight='weight'):
"""Return the length of the shortest path between source and target using
the A* ("A-star") algorithm.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
Raises
------
NetworkXNoPath
If no path exists between source and target.
See Also
--------
astar_path
"""
path = astar_path(G, source, target, heuristic)
return sum(G[u][v].get(weight, 1) for u, v in zip(path[:-1], path[1:]))
| bsd-3-clause | -4,030,149,992,754,040,000 | 29.899371 | 79 | 0.587625 | false |
GambitResearch/replisome | lib/replisome/consumers/DataUpdater.py | 1 | 16446 | from operator import itemgetter
import psycopg2.extras
from psycopg2 import extensions as ext
from psycopg2 import sql
from replisome.errors import ReplisomeError
import logging
logger = logging.getLogger('replisome.DataUpdater')
UNCHANGED_TOAST = {}
def tupgetter(*idxs):
"""Like itemgetter, but return a 1-tuple if the input is one index."""
if len(idxs) == 0:
return tuple
if len(idxs) == 1:
def tupgetter_(obj, _idx=idxs[0]):
return (obj[_idx],)
return tupgetter_
else:
return itemgetter(*idxs)
class DataUpdater(object):
def __init__(self, dsn, upsert=False,
skip_missing_columns=False, skip_missing_tables=False):
"""
Apply changes to a database receiving message from a replisome stream.
:arg upsert: If true update instead (only on primary key, TODO on other
fields)
:arg skip_missing_columns: If true drop the values in the messages
for columns not available locally; otherwise fail if such columns
are found.
:arg skip_missing_columns: If true records on non existing tables are
dropped.
"""
self.dsn = dsn
self.upsert = upsert
self.skip_missing_columns = skip_missing_columns
self.skip_missing_tables = skip_missing_tables
self._connection = None
# Maps from the key() of the message to the columns and table key names
self._colnames = {}
self._keynames = {}
# Maps from the key() of the message to the query to perform each
# operation (insert, update, delete). The values are in the format
# returned by _get_statement() and are invalidated when new colnames
# or keynames are received in a message (suggesting a schema change
# in the origin database).
self._stmts = {'I': {}, 'U': {}, 'D': {}}
def get_connection(self):
cnn, self._connection = self._connection, None
if cnn is None:
cnn = self.connect()
return cnn
def put_connection(self, cnn):
if cnn.closed:
logger.info("discarding closed connection")
return
status = cnn.get_transaction_status()
if status == ext.TRANSACTION_STATUS_UNKNOWN:
logger.info("closing connection in unknown status")
cnn.close()
return
elif status != ext.TRANSACTION_STATUS_IDLE:
logger.warn("rolling back transaction in status %s", status)
cnn.rollback()
self._connection = cnn
def connect(self):
logger.info('connecting to target database at "%s"', self.dsn)
cnn = psycopg2.connect(self.dsn)
return cnn
def process_message(self, msg):
"""
Process an entire message returned by the source.
Apply the message to the target database in a single transaction.
"""
cnn = self.get_connection()
try:
for ch in msg['tx']:
self.process_change(cnn, ch)
cnn.commit()
finally:
self.put_connection(cnn)
def __call__(self, msg):
self.process_message(msg)
def process_change(self, cnn, msg):
"""
Process one of the changes in a replisome message.
"""
stmt, acc = self._get_statement(cnn, msg)
if stmt is None:
logger.debug("skipping message on %s", msg['table'])
return
cur = cnn.cursor()
try:
cur.execute(stmt, acc(msg))
except psycopg2.DatabaseError:
logger.error("error running the query: %s", cur.query)
raise
logger.debug("query run: %s", cur.query)
def _get_statement(self, cnn, msg):
"""
Return the statement needed to process a change.
The statement is a pair (sql, acc) where sql is a query string and
acc is a function that takes the message as input and returns the
query argument.
"""
k = self.key(msg)
if 'colnames' in msg:
logger.debug("got new columns for table %s", k)
if k in self._colnames:
self._stmts['I'].pop(k, None)
self._stmts['U'].pop(k, None)
self._colnames[k] = msg['colnames']
if 'keynames' in msg:
logger.debug("got new key for table %s", k)
if k in self._keynames:
self._stmts['U'].pop(k, None)
self._stmts['D'].pop(k, None)
self._keynames[k] = msg['keynames']
rv = self._get_special_statement(cnn, msg)
if rv is not None:
return rv
op = msg['op']
stmts = self._stmts[op]
try:
rv = stmts[k]
except KeyError:
if op == 'I':
rv = self.make_insert(cnn, msg)
elif op == 'U':
rv = self.make_update(cnn, msg)
elif op == 'D':
rv = self.make_delete(cnn, msg)
stmts[k] = rv
return rv
def _get_special_statement(self, cnn, msg):
"""Handle one-off the case of insert with unchanged toast values"""
if msg['op'] == 'U':
unchs = [i for (i, v) in enumerate(msg['values'])
if v == UNCHANGED_TOAST]
if unchs:
return self.make_update(cnn, msg, unchanged_idxs=unchs)
def make_insert(self, cnn, msg):
"""
Return the query and message-to-argument function to perform an insert.
"""
s = msg.get('schema')
t = msg['table']
local_cols = self.get_table_columns(cnn, s, t)
if local_cols is None:
if not self.skip_missing_tables:
raise ReplisomeError(
"received insert on table %s.%s not available" % (s, t))
logger.info("received insert on table %s.%s not available", s, t)
return None, None
local_cols = set(local_cols)
msg_cols = self._colnames[self.key(msg)]
if not self.skip_missing_columns:
missing = set(msg_cols) - local_cols
if missing:
raise ReplisomeError(
"insert message on table %s.%s has columns not available "
"locally: %s" % (s, t, ', '.join(sorted(missing))))
idxs = [i for i, c in enumerate(msg_cols) if c in local_cols]
nokeyidxs = []
if self.upsert:
if cnn.server_version < 90500:
raise ReplisomeError(
"upsert is only available from PostgreSQL 9.5")
key_cols = self.get_table_pkey(cnn, s, t)
if key_cols is None:
logger.warning(
"table %s.%s can't have upsert: no primary key", s, t)
else:
nokeyidxs = [i for i, c in enumerate(msg_cols)
if c in local_cols and c not in key_cols]
if not idxs:
logger.info(
"the local table has no field in common with the message")
return None, None
logger.debug(
"the local table has %d field in common with the message",
len(idxs))
colmap = tupgetter(*idxs)
def acc(msg, _map=colmap):
return _map(msg['values'])
cols = colmap(msg_cols)
bits = [sql.SQL('insert into ')]
if 'schema' in msg:
bits.append(sql.Identifier(msg['schema']))
bits.append(sql.SQL('.'))
bits.append(sql.Identifier(msg['table']))
bits.append(sql.SQL(' ('))
bits.append(sql.SQL(',').join(map(sql.Identifier, cols)))
bits.append(sql.SQL(') values ('))
bits.append(sql.SQL(',').join(sql.Placeholder() * len(cols)))
bits.append(sql.SQL(')'))
if self.upsert and key_cols is not None:
bits.append(sql.SQL(' on conflict ('))
bits.append(sql.SQL(',').join(map(sql.Identifier, key_cols)))
if nokeyidxs:
bits.append(sql.SQL(') do update set ('))
bits.append(sql.SQL(',').join(
[sql.Identifier(n)
for n in tupgetter(*nokeyidxs)(msg_cols)]))
bits.append(sql.SQL(') = ('))
bits.append(sql.SQL(',').join(
[sql.SQL('excluded.') + sql.Identifier(n)
for n in tupgetter(*nokeyidxs)(msg_cols)]))
bits.append(sql.SQL(')'))
else:
bits.append(sql.SQL(') do nothing'))
stmt = sql.Composed(bits).as_string(cnn)
logger.debug("generated query: %s", stmt)
return stmt, acc
def make_update(self, cnn, msg, unchanged_idxs=()):
"""
Return the query and message-to-argument function to perform an update.
"""
s = msg.get('schema')
t = msg['table']
local_cols = self.get_table_columns(cnn, s, t)
if local_cols is None:
if not self.skip_missing_tables:
raise ReplisomeError(
"received update on table %s.%s not available" % (s, t))
logger.debug("received update on table %s.%s not available", s, t)
return None, None
local_cols = set(local_cols)
msg_cols = self._colnames[self.key(msg)]
msg_keys = self._keynames[self.key(msg)]
if not self.skip_missing_columns:
missing = set(msg_cols) - local_cols
if missing:
raise ReplisomeError(
"update message on table %s.%s has columns not available "
"locally: %s" % (s, t, ', '.join(sorted(missing))))
# the key must be entirely known
kidxs = [i for i, c in enumerate(msg_keys) if c in local_cols]
if not(kidxs):
raise ReplisomeError("the table %s.%s has no key" % (s, t))
if len(kidxs) != len(msg_keys):
raise ReplisomeError(
"the local table %s.%s is missing some key fields %s" %
(s, t, msg_keys))
idxs = [i for i, c in enumerate(msg_cols)
if c in local_cols and i not in unchanged_idxs]
if not idxs:
logger.info(
"the local table has no field in common with the message")
return None, None
colmap = tupgetter(*idxs)
keymap = tupgetter(*kidxs)
logger.debug(
"the local table has %d field in common with the message",
len(idxs))
def acc(msg, _colmap=colmap, _keymap=keymap):
return _colmap(msg['values']) + _keymap(msg['oldkey'])
cols = colmap(msg_cols)
keycols = keymap(msg_keys)
bits = [sql.SQL('update ')]
if 'schema' in msg:
bits.append(sql.Identifier(msg['schema']))
bits.append(sql.SQL('.'))
bits.append(sql.Identifier(msg['table']))
bits.append(sql.SQL(' set ('))
bits.append(sql.SQL(',').join(map(sql.Identifier, cols)))
bits.append(sql.SQL(') = ('))
bits.append(sql.SQL(',').join(sql.Placeholder() * len(cols)))
bits.append(sql.SQL(') where ('))
bits.append(sql.SQL(',').join(map(sql.Identifier, keycols)))
bits.append(sql.SQL(') = ('))
bits.append(sql.SQL(',').join(sql.Placeholder() * len(keycols)))
bits.append(sql.SQL(')'))
stmt = sql.Composed(bits).as_string(cnn)
logger.debug("generated query: %s", stmt)
return stmt, acc
def make_delete(self, cnn, msg):
"""
Return the query and message-to-argument function to perform a delete.
"""
s = msg.get('schema')
t = msg['table']
local_cols = self.get_table_columns(cnn, s, t)
if local_cols is None:
if not self.skip_missing_tables:
raise ReplisomeError(
"received delete on table %s.%s not available" % (s, t))
logger.debug("received delete on table %s.%s not available", s, t)
return None, None
local_cols = set(local_cols)
msg_keys = self._keynames[self.key(msg)]
# the key must be entirely known
kidxs = [i for i, c in enumerate(msg_keys) if c in local_cols]
if not(kidxs):
raise ReplisomeError("the table %s.%s has no key" % (s, t))
if len(kidxs) != len(msg_keys):
raise ReplisomeError(
"the local table %s.%s is missing some key fields %s" %
(s, t, msg_keys))
keymap = tupgetter(*kidxs)
def acc(msg, _keymap=keymap):
return _keymap(msg['oldkey'])
keycols = keymap(msg_keys)
bits = [sql.SQL('delete from ')]
if 'schema' in msg:
bits.append(sql.Identifier(msg['schema']))
bits.append(sql.SQL('.'))
bits.append(sql.Identifier(msg['table']))
bits.append(sql.SQL(' where ('))
bits.append(sql.SQL(',').join(map(sql.Identifier, keycols)))
bits.append(sql.SQL(') = ('))
bits.append(sql.SQL(',').join(sql.Placeholder() * len(keycols)))
bits.append(sql.SQL(')'))
stmt = sql.Composed(bits).as_string(cnn)
logger.debug("generated query: %s", stmt)
return stmt, acc
def get_table_columns(self, cnn, schema, table):
"""
Return the list of column names in a table, optionally schema-qualified
Return null if the table is not found.
"""
if schema is None:
sql = """
select array_agg(attname)
from (
select attname from pg_attribute
where attrelid = (
select c.oid from pg_class c
where relname = %(table)s
and relkind = 'r' limit 1)
and attnum > 0 and not attisdropped
order by attnum) x
"""
else:
sql = """
select array_agg(attname)
from (
select attname from pg_attribute
where attrelid = (
select c.oid from pg_class c
join pg_namespace s on s.oid = relnamespace
where relname = %(table)s and nspname = %(schema)s
and relkind = 'r' limit 1)
and attnum > 0 and not attisdropped
order by attnum) x
"""
cur = cnn.cursor()
cur.execute(sql, {'table': table, 'schema': schema})
return cur.fetchone()[0]
def get_table_pkey(self, cnn, schema, table):
"""
Return the list of column names in a table's primary key, optionally
schema-qualified
Return null if the table is not found.
"""
if schema is None:
sql = """
select array_agg(attname)
from (
select attname from pg_attribute
where attrelid = (
select c.conindid from pg_class r
join pg_constraint c on conrelid = r.oid
where r.relname = %(table)s
and r.relkind = 'r' and contype = 'p' limit 1)
and attnum > 0 and not attisdropped
order by attnum) x
"""
else:
sql = """
select array_agg(attname)
from (
select attname from pg_attribute
where attrelid = (
select c.conindid from pg_class r
join pg_constraint c on conrelid = r.oid
join pg_namespace s on s.oid = r.relnamespace
where r.relname = %(table)s and nspname = %(schema)s
and r.relkind = 'r' and contype = 'p' limit 1)
and attnum > 0 and not attisdropped
order by attnum) x
"""
cur = cnn.cursor()
cur.execute(sql, {'table': table, 'schema': schema})
return cur.fetchone()[0]
def key(self, msg):
"""Return a key to identify a table from a message."""
return (msg.get('schema'), msg['table'])
| bsd-3-clause | -2,957,178,685,116,214,000 | 34.066098 | 79 | 0.522802 | false |
mdeemer/XlsxWriter | xlsxwriter/test/comparison/test_chart_high_low_lines02.py | 8 | 1817 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_high_low_lines02.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with high-low lines."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [61180928, 63898368]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.set_high_low_lines({
'line': {
'color': 'red',
'dash_type': 'square_dot'
}
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | 214,080,986,216,174,050 | 24.236111 | 79 | 0.511282 | false |
SecLion77/i3_config | scripts/workspace_controller.py | 1 | 4434 | #!/usr/bin/python3
import subprocess
import sys
import json
import math
import os
from os.path import expanduser
from tempfile import TemporaryFile
def get_workspace():
handle = subprocess.Popen(
["i3-msg", "-t", "get_workspaces"], stdout=subprocess.PIPE)
output = handle.communicate()[0]
data = json.loads(output.decode())
data = sorted(data, key=lambda k: k['name'])
for i in data:
if(i['focused']):
return i['name']
def get_workspaces():
handle = subprocess.Popen(
["i3-msg", "-t", "get_workspaces"], stdout=subprocess.PIPE)
output = handle.communicate()[0]
data = json.loads(output.decode())
data = sorted(data, key=lambda k: k['name'])
arr = []
for i in data:
arr.append(i['name'])
return arr
def move_to(num):
subprocess.Popen(
["i3-msg", "move container to workspace " + str(num)],
stdout=subprocess.PIPE)
def go_to(num):
subprocess.Popen(["i3-msg", "workspace "+str(num)], stdout=subprocess.PIPE)
def dmenu_fetch(inputstr):
t = TemporaryFile()
t.write(bytes(inputstr, 'UTF-8'))
t.seek(0)
dmenu_run = subprocess.Popen(
["dmenu", "-b"], stdout=subprocess.PIPE, stdin=t)
output = (dmenu_run.communicate()[0]).decode().strip()
return output
def open_app(workspace):
home = expanduser("~")
cache = home+"/.cache/dmenu_run"
check_new_programs(home, cache)
applications = open(cache)
dmenu_run = subprocess.Popen(
["dmenu", "-b"], stdout=subprocess.PIPE, stdin=applications)
output = (dmenu_run.communicate()[0]).decode().strip()
subprocess.Popen(
["i3-msg", "workspace " + workspace + "; exec " + output],
stdout=subprocess.PIPE)
def check_new_programs(home, cachefile):
PATH = os.environ.get('PATH')
check = subprocess.Popen(
[home + "/.i3/scripts/dmenu_update"], stdout=subprocess.PIPE)
check.communicate()
if len(sys.argv) < 1:
print("Error not enough arguements")
else:
command = sys.argv[1]
switch_number = 1 # default switch number
if len(sys.argv) == 3:
# they passed in a number to move to
try:
switch_number = int(sys.argv[2])
except ValueError:
pass
# get the workspace number
workspace_name = get_workspace()
workspace_val = 1 # default value if name parseing fails
workspace_prefix = ''
try:
match_set = '0123456789-'
# only look for digits in the number
workspace_val = int(
''.join(
filter(
lambda x: x in match_set,
workspace_name)))
# include - in the ignore list incase it is a negative number
workspace_prefix = ''.join(
filter(
lambda x: x not in match_set,
workspace_name))
except ValueError:
pass
print(workspace_prefix)
# handle the commands
if command == 'up':
workspace_val += 10
elif command == 'down':
workspace_val -= 10
elif command == 'next':
workspace_val += 1
elif command == 'prev':
workspace_val -= 1
elif command == 'go':
# go to workspace in block
workspace_rounded = int(math.floor(workspace_val/10))*10
workspace_rounded += switch_number
go_to(workspace_prefix + str(workspace_rounded))
elif command == 'move':
# move the current container to the selected workspace
workspace_rounded = int(math.floor(workspace_val/10))*10
workspace_rounded += switch_number
move_to(workspace_prefix + str(workspace_rounded))
elif command == 'open':
open_app(workspace_name)
elif command == 'dynamic':
# dynamic tagging
command2 = sys.argv[2]
workspaces = get_workspaces()
inputstr = '\n'.join(workspaces)
result = dmenu_fetch(inputstr)
if command2 == 'go':
go_to(result)
elif command2 == 'move':
move_to(result)
if len(sys.argv) == 3:
# not a go or move, command2 is argv2
command2 = sys.argv[2]
if command == 'up' or command == 'down' or command == 'prev' or command == 'next':
if command2 == 'go':
go_to(workspace_prefix + str(workspace_val))
elif command2 == 'move':
move_to(workspace_prefix + str(workspace_val))
| mit | 7,642,548,049,970,373,000 | 30.006993 | 90 | 0.586378 | false |
Distrotech/glib | build/win32/setup.py | 7 | 17387 | #!/usr/bin/python
# vim: encoding=utf-8
#expand *.in files
#this script is only intended for building from git, not for building from the released tarball, which already includes all necessary files
import os
import sys
import re
import string
import subprocess
import optparse
def get_version(srcroot):
ver = {}
RE_VERSION = re.compile(r'^m4_define\(\[(glib_\w+)\],\s*\[(\d+)\]\)')
with open(os.path.join(srcroot, 'configure.ac'), 'r') as ac:
for i in ac:
mo = RE_VERSION.search(i)
if mo:
ver[mo.group(1).upper()] = int(mo.group(2))
ver['GLIB_BINARY_AGE'] = 100 * ver['GLIB_MINOR_VERSION'] + ver['GLIB_MICRO_VERSION']
ver['GLIB_VERSION'] = '%d.%d.%d' % (ver['GLIB_MAJOR_VERSION'],
ver['GLIB_MINOR_VERSION'],
ver['GLIB_MICRO_VERSION'])
ver['LT_RELEASE'] = '%d.%d' % (ver['GLIB_MAJOR_VERSION'], ver['GLIB_MINOR_VERSION'])
ver['LT_CURRENT'] = 100 * ver['GLIB_MINOR_VERSION'] + ver['GLIB_MICRO_VERSION'] - ver['GLIB_INTERFACE_AGE']
ver['LT_REVISION'] = ver['GLIB_INTERFACE_AGE']
ver['LT_AGE'] = ver['GLIB_BINARY_AGE'] - ver['GLIB_INTERFACE_AGE']
ver['LT_CURRENT_MINUS_AGE'] = ver['LT_CURRENT'] - ver['LT_AGE']
return ver
def process_in(src, dest, vars):
RE_VARS = re.compile(r'@(\w+?)@')
with open(src, 'r') as s:
with open(dest, 'w') as d:
for i in s:
i = RE_VARS.sub(lambda x: str(vars[x.group(1)]), i)
d.write(i)
def get_srcroot():
if not os.path.isabs(__file__):
path = os.path.abspath(__file__)
else:
path = __file__
dirname = os.path.dirname(path)
return os.path.abspath(os.path.join(dirname, '..', '..'))
def process_include(src, dest, includes):
RE_INCLUDE = re.compile(r'^\s*#include\s+"(.*)"')
with open(src, 'r') as s:
with open(dest, 'w') as d:
for i in s:
mo = RE_INCLUDE.search(i)
if mo:
target = ''
for j in includes:
#print "searching in ", j
if mo.group(1) in os.listdir(j):
target = os.path.join(j, mo.group(1))
break
if not target:
raise Exception("Couldn't fine include file %s" % mo.group(1))
else:
with open(target, 'r') as t:
for inc in t.readlines():
d.write(inc)
else:
d.write(i)
def generate_libgio_sourcefiles(srcroot, dest, stype):
vars = read_vars_from_AM(os.path.join(srcroot, 'gio', 'Makefile.am'),
vars = {'top_srcdir': srcroot},
conds = {'OS_WIN32': True},
filters = ['libgio_2_0_la_SOURCES', 'win32_more_sources_for_vcproj'])
files = vars['libgio_2_0_la_SOURCES'].split() + \
vars['win32_more_sources_for_vcproj'].split()
sources = [i for i in files \
if i != 'gdesktopappinfo.c' and \
not (i.startswith('gunix') and i.endswith('.c')) \
and i.endswith('.c') ]
if stype == '9':
with open(dest, 'w') as d:
for i in sources:
d.write('\t\t\t<File RelativePath="..\\..\\..\\gio\\' + i.replace('/', '\\') + '"/>\n')
elif stype == '10':
with open(dest, 'w') as d:
for i in sources:
d.write('\t\t\t<ClCompile Include="..\\..\\..\\gio\\' + i.replace('/', '\\') + '"/>\n')
elif stype == '10f':
with open(dest, 'w') as d:
for i in sources:
d.write('\t\t\t<ClCompile Include="..\\..\\..\\gio\\' + i.replace('/', '\\') + '"><Filter>Source Files</Filter></ClCompile>\n')
else:
raise Exception("Must specify project type (9, 10 or 10f)")
def generate_libgio_enumtypes(srcroot, perl):
vars = read_vars_from_AM(os.path.join(srcroot, 'gio', 'Makefile.am'),
vars = {'top_srcdir': srcroot},
conds = {'OS_WIN32': True},
filters = ['gio_headers'])
cwd = os.getcwd()
os.chdir(os.path.join(srcroot, 'gio'))
for suffix in ['.c', '.h']:
cmd = [perl, os.path.join(srcroot, 'gobject', 'glib-mkenums'),
'--template', 'gioenumtypes' + suffix + '.template'] + vars['gio_headers'].split()
with open('gioenumtypes' + suffix, 'w') as d:
subprocess.Popen(cmd, stdout = d).communicate()
os.chdir(cwd)
def generate_libglib_sourcefiles(srcroot, dest, stype):
vars = read_vars_from_AM(os.path.join(srcroot, 'glib', 'Makefile.am'),
vars = {'top_srcdir': srcroot},
conds = {'OS_WIN32': True,
'ENABLE_REGEX': True},
filters = ['libglib_2_0_la_SOURCES'])
files = vars['libglib_2_0_la_SOURCES'].split()
sources = [i for i in files \
if not (i.endswith('-gcc.c') or i.endswith('-unix.c')) \
and i.endswith('.c') ]
if stype == '9':
with open(dest, 'w') as d:
for i in sources:
d.write('\t\t\t<File RelativePath="..\\..\\..\\glib\\' + i.replace('/', '\\') + '"/>\n')
elif stype == '10':
with open(dest, 'w') as d:
for i in sources:
d.write('\t\t\t<ClCompile Include="..\\..\\..\\glib\\' + i.replace('/', '\\') + '"/>\n')
elif stype == '10f':
with open(dest, 'w') as d:
for i in sources:
d.write('\t\t\t<ClCompile Include="..\\..\\..\\glib\\' + i.replace('/', '\\') + '"><Filter>Source Files</Filter></ClCompile>\n')
else:
raise Exception("Must specify project type (9, 10 or 10f)")
def generate_libgobject_sourcefiles(srcroot, dest, stype):
vars = read_vars_from_AM(os.path.join(srcroot, 'gobject', 'Makefile.am'),
vars = {'top_srcdir': srcroot},
conds = {'OS_WIN32': True},
filters = ['libgobject_2_0_la_SOURCES'])
files = vars['libgobject_2_0_la_SOURCES'].split()
sources = [i for i in files if i.endswith('.c') ]
if stype == '9':
with open(dest, 'w') as d:
for i in sources:
d.write('\t\t\t<File RelativePath="..\\..\\..\\gobject\\' + i.replace('/', '\\') + '"/>\n')
elif stype == '10':
with open(dest, 'w') as d:
for i in sources:
d.write('\t\t\t<ClCompile Include="..\\..\\..\\gobject\\' + i.replace('/', '\\') + '"/>\n')
elif stype == '10f':
with open(dest, 'w') as d:
for i in sources:
d.write('\t\t\t<ClCompile Include="..\\..\\..\\gobject\\' + i.replace('/', '\\') + '"><Filter>Source Files</Filter></ClCompile>\n')
else:
raise Exception("Must specify project type (9, 10 or 10f)")
def read_vars_from_AM(path, vars = {}, conds = {}, filters = None):
'''
path: path to the Makefile.am
vars: predefined variables
conds: condition variables for Makefile
filters: if None, all variables defined are returned,
otherwise, it is a list contains that variables should be returned
'''
cur_vars = vars.copy()
RE_AM_VAR_REF = re.compile(r'\$\((\w+?)\)')
RE_AM_VAR = re.compile(r'^\s*(\w+)\s*=(.*)$')
RE_AM_INCLUDE = re.compile(r'^\s*include\s+(\w+)')
RE_AM_CONTINUING = re.compile(r'\\\s*$')
RE_AM_IF = re.compile(r'^\s*if\s+(\w+)')
RE_AM_ELSE = re.compile(r'^\s*else')
RE_AM_ENDIF = re.compile(r'^\s*endif')
def am_eval(cont):
return RE_AM_VAR_REF.sub(lambda x: cur_vars.get(x.group(1), ''), cont)
with open(path, 'r') as f:
contents = f.readlines()
#combine continuing lines
i = 0
ncont = []
while i < len(contents):
line = contents[i]
if RE_AM_CONTINUING.search(line):
line = RE_AM_CONTINUING.sub('', line)
j = i + 1
while j < len(contents) and RE_AM_CONTINUING.search(contents[j]):
line += RE_AM_CONTINUING.sub('', contents[j])
j += 1
else:
if j < len(contents):
line += contents[j]
i = j
else:
i += 1
ncont.append(line)
#include, var define, var evaluation
i = -1
skip = False
oldskip = []
while i < len(ncont) - 1:
i += 1
line = ncont[i]
mo = RE_AM_IF.search(line)
if mo:
oldskip.append(skip)
skip = False if mo.group(1) in conds and conds[mo.group(1)] \
else True
continue
mo = RE_AM_ELSE.search(line)
if mo:
skip = not skip
continue
mo = RE_AM_ENDIF.search(line)
if mo:
skip = oldskip.pop()
continue
if not skip:
mo = RE_AM_INCLUDE.search(line)
if mo:
cur_vars.update(read_vars_from_AM(am_eval(mo.group(1)), cur_vars, conds, None))
continue
mo = RE_AM_VAR.search(line)
if mo:
cur_vars[mo.group(1)] = am_eval(mo.group(2).strip())
continue
#filter:
if filters != None:
ret = {}
for i in filters:
ret[i] = cur_vars.get(i, '')
return ret
else:
return cur_vars
def main(argv):
parser = optparse.OptionParser()
parser.add_option('-p', '--perl', dest='perl', metavar='PATH', default='C:\\Perl\\bin\\perl.exe', action='store', help='path to the perl interpretor (default: C:\\Perl\\bin\\perl.exe)')
opt, args = parser.parse_args(argv)
srcroot = get_srcroot()
#print 'srcroot', srcroot
ver = get_version(srcroot)
#print 'ver', ver
config_vars = ver.copy()
config_vars['GETTEXT_PACKAGE'] = 'Glib'
process_in(os.path.join(srcroot, 'config.h.win32.in'),
os.path.join(srcroot, 'config.h'),
config_vars)
glibconfig_vars = ver.copy()
glibconfig_vars['GLIB_WIN32_STATIC_COMPILATION_DEFINE'] = ''
process_in(os.path.join(srcroot, 'glib', 'glibconfig.h.win32.in'),
os.path.join(srcroot, 'glib', 'glibconfig.h'),
glibconfig_vars)
for submodule in ['glib', 'gobject', 'gthread', 'gmodule', 'gio']:
process_in(os.path.join(srcroot, submodule, submodule + '.rc.in'),
os.path.join(srcroot, submodule, submodule + '.rc'),
ver)
#------------ submodule gobject -------------------
generate_libglib_sourcefiles(srcroot,
os.path.join(srcroot, 'build', 'win32', 'libglib.sourcefiles'), '9')
generate_libglib_sourcefiles(srcroot,
os.path.join(srcroot, 'build', 'win32', 'libglib.vs10.sourcefiles'), '10')
generate_libglib_sourcefiles(srcroot,
os.path.join(srcroot, 'build', 'win32', 'libglib.vs10.sourcefiles.filters'), '10f')
process_include(os.path.join(srcroot, 'build', 'win32', 'vs9', 'glib.vcprojin'),
os.path.join(srcroot, 'build', 'win32', 'vs9', 'glib.vcproj'),
includes = [os.path.join(srcroot, 'build', 'win32')])
process_include(os.path.join(srcroot, 'build', 'win32', 'vs10', 'glib.vcxprojin'),
os.path.join(srcroot, 'build', 'win32', 'vs10', 'glib.vcxproj'),
includes = [os.path.join(srcroot, 'build', 'win32')])
process_include(os.path.join(srcroot, 'build', 'win32', 'vs10', 'glib.vcxproj.filtersin'),
os.path.join(srcroot, 'build', 'win32', 'vs10', 'glib.vcxproj.filters'),
includes = [os.path.join(srcroot, 'build', 'win32')])
os.unlink(os.path.join(srcroot, 'build', 'win32', 'libglib.sourcefiles'))
os.unlink(os.path.join(srcroot, 'build', 'win32', 'libglib.vs10.sourcefiles'))
os.unlink(os.path.join(srcroot, 'build', 'win32', 'libglib.vs10.sourcefiles.filters'))
with open(os.path.join(srcroot, 'glib', 'gspawn-win32-helper-console.c'), 'w') as c:
c.write('#define HELPER_CONSOLE\n')
c.write('#include "gspawn-win32-helper.c"\n')
with open(os.path.join(srcroot, 'glib', 'gspawn-win64-helper-console.c'), 'w') as c:
c.write('#define HELPER_CONSOLE\n')
c.write('#include "gspawn-win32-helper.c"\n')
with open(os.path.join(srcroot, 'glib', 'gspawn-win64-helper.c'), 'w') as c:
c.write('#include "gspawn-win32-helper.c"\n')
#------------ end of submodule glib -------------------
#------------ submodule gobject -------------------
mkenums_vars = ver.copy()
mkenums_vars.update({'PERL_PATH': opt.perl})
process_in(os.path.join(srcroot, 'gobject', 'glib-mkenums.in'),
os.path.join(srcroot, 'gobject', 'glib-mkenums'),
mkenums_vars)
#gmarshal.strings
cwd = os.getcwd()
os.chdir(os.path.join(srcroot, 'gobject'))
with open(os.path.join(srcroot, 'gobject', 'gmarshal.strings'), 'w') as d:
with open(os.path.join(srcroot, 'gobject', 'gmarshal.list'), 'r') as s:
for i in s:
if i[0] not in string.ascii_uppercase: #^[A-Z]
continue
line = '"g_cclosure_marshal_' # s/^/"g_cclosure_marshal_/
for c in i:
if c == ':':
line += '__' # s/:/__
elif c == ',':
line += '_' # s/,/_
elif c not in '\r\n':
line += c
d.write(line + '",\n')
#subprocess.Popen([opt.perl, 'marshal-genstrings.pl'], stdout=d).communicate()
os.chdir(cwd)
generate_libgobject_sourcefiles(srcroot,
os.path.join(srcroot, 'build', 'win32', 'libgobject.sourcefiles'), '9')
generate_libgobject_sourcefiles(srcroot,
os.path.join(srcroot, 'build', 'win32', 'libgobject.vs10.sourcefiles'), '10')
generate_libgobject_sourcefiles(srcroot,
os.path.join(srcroot, 'build', 'win32', 'libgobject.vs10.sourcefiles.filters'), '10f')
process_include(os.path.join(srcroot, 'build', 'win32', 'vs9', 'gobject.vcprojin'),
os.path.join(srcroot, 'build', 'win32', 'vs9', 'gobject.vcproj'),
includes = [os.path.join(srcroot, 'build', 'win32')])
process_include(os.path.join(srcroot, 'build', 'win32', 'vs10', 'gobject.vcxprojin'),
os.path.join(srcroot, 'build', 'win32', 'vs10', 'gobject.vcxproj'),
includes = [os.path.join(srcroot, 'build', 'win32')])
process_include(os.path.join(srcroot, 'build', 'win32', 'vs10', 'gobject.vcxproj.filtersin'),
os.path.join(srcroot, 'build', 'win32', 'vs10', 'gobject.vcxproj.filters'),
includes = [os.path.join(srcroot, 'build', 'win32')])
os.unlink(os.path.join(srcroot, 'build', 'win32', 'libgobject.sourcefiles'))
os.unlink(os.path.join(srcroot, 'build', 'win32', 'libgobject.vs10.sourcefiles'))
os.unlink(os.path.join(srcroot, 'build', 'win32', 'libgobject.vs10.sourcefiles.filters'))
#------------ end of submodule gobject -------------------
#------------ submodule gio -------------------
#depends on glib-mkenums
generate_libgio_sourcefiles(srcroot,
os.path.join(srcroot, 'build', 'win32', 'libgio.sourcefiles'), '9')
generate_libgio_sourcefiles(srcroot,
os.path.join(srcroot, 'build', 'win32', 'libgio.vs10.sourcefiles'), '10')
generate_libgio_sourcefiles(srcroot,
os.path.join(srcroot, 'build', 'win32', 'libgio.vs10.sourcefiles.filters'), '10f')
process_include(os.path.join(srcroot, 'build', 'win32', 'vs9', 'gio.vcprojin'),
os.path.join(srcroot, 'build', 'win32', 'vs9', 'gio.vcproj'),
includes = [os.path.join(srcroot, 'build', 'win32')])
process_include(os.path.join(srcroot, 'build', 'win32', 'vs10', 'gio.vcxprojin'),
os.path.join(srcroot, 'build', 'win32', 'vs10', 'gio.vcxproj'),
includes = [os.path.join(srcroot, 'build', 'win32')])
process_include(os.path.join(srcroot, 'build', 'win32', 'vs10', 'gio.vcxproj.filtersin'),
os.path.join(srcroot, 'build', 'win32', 'vs10', 'gio.vcxproj.filters'),
includes = [os.path.join(srcroot, 'build', 'win32')])
os.unlink(os.path.join(srcroot, 'build', 'win32', 'libgio.sourcefiles'))
os.unlink(os.path.join(srcroot, 'build', 'win32', 'libgio.vs10.sourcefiles'))
os.unlink(os.path.join(srcroot, 'build', 'win32', 'libgio.vs10.sourcefiles.filters'))
generate_libgio_enumtypes(srcroot, opt.perl)
#------------ end of submodule gio -------------------
#------------ submodule gmodule -------------------
#------------ end of submodule gmodule -------------------
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| lgpl-2.1 | -6,020,025,983,272,109,000 | 46.247283 | 189 | 0.515788 | false |
trolldbois/ctypeslib | test/test_fast_clang.py | 1 | 3051 | import unittest
import ctypes
from test.util import ClangTest
class CompareSizes(ClangTest):
"""Compare python sizes with the clang framework.
"""
#@unittest.skip('')
def test_basic_types_size(self):
"""Test sizes of pod."""
targets = ['_char', '_short', '_int', '_uint', '_long', '_ulong',
'_double', '_longdouble', '_float', '_ptr']
for flags in [['-target', 'i386-linux'], ['-target', 'x86_64-linux']]:
self.gen('test/data/test-basic-types.c', flags)
for name in targets:
self.assertSizes(name)
#@unittest.skip('')
#@unittest.expectedFailure # packed attribute
def test_records_size(self):
"""Test sizes of records."""
targets = ['struct_Name', 'struct_Name2', 'struct_Node', 'struct_Node2', 'myEnum',
'struct_Node3', 'struct_Node4', 'my__quad_t', 'my_bitfield',
'mystruct']
for flags in [['-target', 'i386-linux'], ['-target', 'x86_64-linux']]:
self.gen('test/data/test-records.c', flags)
for name in targets:
self.assertSizes(name)
def test_records_fields_offset(self):
"""Test offset of records fields."""
targets = ['struct_Name', 'struct_Name2', 'struct_Node', 'struct_Node2',
'struct_Node3', 'struct_Node4', 'my__quad_t', 'my_bitfield',
'mystruct']
for flags in [['-target', 'i386-linux'], ['-target', 'x86_64-linux']]:
self.gen('test/data/test-records.c', flags)
for name in targets:
self.assertOffsets(name)
@unittest.expectedFailure
def test_includes_x32(self):
"""Test sizes of pod with std include."""
targets = ['int8_t', 'intptr_t', 'intmax_t']
# no size here ['a','b','c','d','e','f','g','h']
# will fail with IncorrectWordSizeError
self.gen('test/data/test-stdint.cpp', ['-target', 'i386-linux'])
for name in targets:
self.assertSizes(name)
def test_includes(self):
"""Test sizes of pod with std include."""
targets = ['int8_t', 'intptr_t', 'intmax_t']
# no size here ['a','b','c','d','e','f','g','h']
# Todo: struct__IO_FILE is used in gen in POINTER before typedef
self.gen('test/data/test-stdint.cpp', ['-target', 'x86_64-linux'])
for name in targets:
self.assertSizes(name)
def test_record_complex(self):
"""Test sizes of complex record fields."""
targets = ['complex1', 'complex2', 'complex3', 'complex4', 'complex5',
'complex6']
for flags in [['-target', 'i386-linux'], ['-target', 'x86_64-linux']]:
self.gen('test/data/test-records-complex.c', flags)
for name in targets:
self.assertSizes(name)
self.assertOffsets(name)
import logging
import sys
if __name__ == "__main__":
#logging.basicConfig( stream=sys.stderr, level=logging.DEBUG )
unittest.main()
| mit | -8,898,688,422,079,667,000 | 38.115385 | 90 | 0.55195 | false |
nacl-webkit/chrome_deps | chrome/test/install_test/theme_updater.py | 6 | 2887 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Update tests for themes."""
import os
from common import util
import chrome_options
import install_test
class ThemeUpdater(install_test.InstallTest):
"""Theme update tests."""
_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
_EXTENSIONS_DIR = os.path.join(_DIRECTORY, os.path.pardir, 'data',
'extensions')
camo_theme = os.path.join(_EXTENSIONS_DIR, 'theme.crx')
camo_img = ('chrome://theme/IDR_THEME_NTP_BACKGROUND?'
'iamefpfkojoapidjnbafmgkgncegbkad')
def setUp(self):
super(ThemeUpdater, self).setUp()
self._user_data_dir = util.MakeTempDir()
def _CheckThemeApplied(self):
"""Loads the New Tab Page and asserts that the theme is applied."""
self._driver.get('chrome://newtab')
html = self._driver.find_element_by_xpath('html')
html_background = html.value_of_css_property('background-image')
self.assertTrue(self.camo_img in html_background,
msg='Did not find expected theme background-image')
def _StartChromeProfile(self, incognito=False):
"""Start Chrome with a temp profile.
Args:
incognito: Boolean flag for starting Chrome in incognito.
"""
options = chrome_options.ChromeOptions()
options.SetUserDataDir(self._user_data_dir)
if incognito:
options.AddSwitch('incognito')
self.StartChrome(options.GetCapabilities())
def _StartChromeProfileExtension(self, extension):
"""Start Chrome with a temp profile and with specified extension.
Args:
extension: Paths to extension to be installed.
"""
options = chrome_options.ChromeOptions()
options.AddExtension(extension)
options.SetUserDataDir(self._user_data_dir)
self.StartChrome(options.GetCapabilities())
def testInstallTheme(self):
"""Install a theme and check it is still applied after update."""
self.Install(self.GetUpdateBuilds()[0])
self._StartChromeProfileExtension(self.camo_theme)
self._CheckThemeApplied()
# Update and relaunch without extension.
self.Install(self.GetUpdateBuilds()[1])
self._StartChromeProfile()
self._CheckThemeApplied()
def testInstallThemeIncognito(self):
"""Install a theme and check it still applies to incognito after update."""
self.Install(self.GetUpdateBuilds()[0])
self._StartChromeProfileExtension(self.camo_theme)
self._CheckThemeApplied()
# Relaunch without extension in incognito.
self._driver.quit()
self._StartChromeProfile(incognito=True)
self._CheckThemeApplied()
# Update and relaunch without extension in incognito.
self.Install(self.GetUpdateBuilds()[1])
self._StartChromeProfile(incognito=True)
self._CheckThemeApplied()
| bsd-3-clause | -2,779,664,602,906,177,000 | 33.783133 | 79 | 0.704538 | false |
MostafaGazar/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 19 | 2448 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_cols, hidden_units=[10, 10])
# Fit
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
print("Predictions: {}".format(str(y)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 | -849,208,337,605,358,200 | 33.971429 | 75 | 0.674837 | false |
kvar/ansible | lib/ansible/modules/cloud/cloudstack/cs_securitygroup_rule.py | 38 | 12923 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_securitygroup_rule
short_description: Manages security group rules on Apache CloudStack based clouds.
description:
- Add and remove security group rules.
version_added: '2.0'
author: René Moser (@resmo)
options:
security_group:
description:
- Name of the security group the rule is related to. The security group must be existing.
type: str
required: true
state:
description:
- State of the security group rule.
type: str
default: present
choices: [ present, absent ]
protocol:
description:
- Protocol of the security group rule.
type: str
default: tcp
choices: [ tcp, udp, icmp, ah, esp, gre ]
type:
description:
- Ingress or egress security group rule.
type: str
default: ingress
choices: [ ingress, egress ]
cidr:
description:
- CIDR (full notation) to be used for security group rule.
type: str
default: 0.0.0.0/0
user_security_group:
description:
- Security group this rule is based of.
type: str
start_port:
description:
- Start port for this rule. Required if I(protocol=tcp) or I(protocol=udp).
type: int
aliases: [ port ]
end_port:
description:
- End port for this rule. Required if I(protocol=tcp) or I(protocol=udp), but I(start_port) will be used if not set.
type: int
icmp_type:
description:
- Type of the icmp message being sent. Required if I(protocol=icmp).
type: int
icmp_code:
description:
- Error code for this icmp message. Required if I(protocol=icmp).
type: int
project:
description:
- Name of the project the security group to be created in.
type: str
poll_async:
description:
- Poll async jobs until job has finished.
default: yes
type: bool
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
---
- name: allow inbound port 80/tcp from 1.2.3.4 added to security group 'default'
cs_securitygroup_rule:
security_group: default
port: 80
cidr: 1.2.3.4/32
delegate_to: localhost
- name: allow tcp/udp outbound added to security group 'default'
cs_securitygroup_rule:
security_group: default
type: egress
start_port: 1
end_port: 65535
protocol: '{{ item }}'
with_items:
- tcp
- udp
delegate_to: localhost
- name: allow inbound icmp from 0.0.0.0/0 added to security group 'default'
cs_securitygroup_rule:
security_group: default
protocol: icmp
icmp_code: -1
icmp_type: -1
delegate_to: localhost
- name: remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default'
cs_securitygroup_rule:
security_group: default
port: 80
state: absent
delegate_to: localhost
- name: allow inbound port 80/tcp from security group web added to security group 'default'
cs_securitygroup_rule:
security_group: default
port: 80
user_security_group: web
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the of the rule.
returned: success
type: str
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
security_group:
description: security group of the rule.
returned: success
type: str
sample: default
type:
description: type of the rule.
returned: success
type: str
sample: ingress
cidr:
description: CIDR of the rule.
returned: success and cidr is defined
type: str
sample: 0.0.0.0/0
user_security_group:
description: user security group of the rule.
returned: success and user_security_group is defined
type: str
sample: default
protocol:
description: protocol of the rule.
returned: success
type: str
sample: tcp
start_port:
description: start port of the rule.
returned: success
type: int
sample: 80
end_port:
description: end port of the rule.
returned: success
type: int
sample: 80
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together
class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackSecurityGroupRule, self).__init__(module)
self.returns = {
'icmptype': 'icmp_type',
'icmpcode': 'icmp_code',
'endport': 'end_port',
'startport': 'start_port',
'protocol': 'protocol',
'cidr': 'cidr',
'securitygroupname': 'user_security_group',
}
def _tcp_udp_match(self, rule, protocol, start_port, end_port):
return (protocol in ['tcp', 'udp'] and
protocol == rule['protocol'] and
start_port == int(rule['startport']) and
end_port == int(rule['endport']))
def _icmp_match(self, rule, protocol, icmp_code, icmp_type):
return (protocol == 'icmp' and
protocol == rule['protocol'] and
icmp_code == int(rule['icmpcode']) and
icmp_type == int(rule['icmptype']))
def _ah_esp_gre_match(self, rule, protocol):
return (protocol in ['ah', 'esp', 'gre'] and
protocol == rule['protocol'])
def _type_security_group_match(self, rule, security_group_name):
return (security_group_name and
'securitygroupname' in rule and
security_group_name == rule['securitygroupname'])
def _type_cidr_match(self, rule, cidr):
return ('cidr' in rule and
cidr == rule['cidr'])
def _get_rule(self, rules):
user_security_group_name = self.module.params.get('user_security_group')
cidr = self.module.params.get('cidr')
protocol = self.module.params.get('protocol')
start_port = self.module.params.get('start_port')
end_port = self.get_or_fallback('end_port', 'start_port')
icmp_code = self.module.params.get('icmp_code')
icmp_type = self.module.params.get('icmp_type')
if protocol in ['tcp', 'udp'] and (start_port is None or end_port is None):
self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol)
if protocol == 'icmp' and (icmp_type is None or icmp_code is None):
self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol)
for rule in rules:
if user_security_group_name:
type_match = self._type_security_group_match(rule, user_security_group_name)
else:
type_match = self._type_cidr_match(rule, cidr)
protocol_match = (self._tcp_udp_match(rule, protocol, start_port, end_port) or
self._icmp_match(rule, protocol, icmp_code, icmp_type) or
self._ah_esp_gre_match(rule, protocol))
if type_match and protocol_match:
return rule
return None
def get_security_group(self, security_group_name=None):
if not security_group_name:
security_group_name = self.module.params.get('security_group')
args = {
'securitygroupname': security_group_name,
'projectid': self.get_project('id'),
}
sgs = self.query_api('listSecurityGroups', **args)
if not sgs or 'securitygroup' not in sgs:
self.module.fail_json(msg="security group '%s' not found" % security_group_name)
return sgs['securitygroup'][0]
def add_rule(self):
security_group = self.get_security_group()
args = {}
user_security_group_name = self.module.params.get('user_security_group')
# the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0.
# that is why we ignore if we have a user_security_group.
if user_security_group_name:
args['usersecuritygrouplist'] = []
user_security_group = self.get_security_group(user_security_group_name)
args['usersecuritygrouplist'].append({
'group': user_security_group['name'],
'account': user_security_group['account'],
})
else:
args['cidrlist'] = self.module.params.get('cidr')
args['protocol'] = self.module.params.get('protocol')
args['startport'] = self.module.params.get('start_port')
args['endport'] = self.get_or_fallback('end_port', 'start_port')
args['icmptype'] = self.module.params.get('icmp_type')
args['icmpcode'] = self.module.params.get('icmp_code')
args['projectid'] = self.get_project('id')
args['securitygroupid'] = security_group['id']
rule = None
res = None
sg_type = self.module.params.get('type')
if sg_type == 'ingress':
if 'ingressrule' in security_group:
rule = self._get_rule(security_group['ingressrule'])
if not rule:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('authorizeSecurityGroupIngress', **args)
elif sg_type == 'egress':
if 'egressrule' in security_group:
rule = self._get_rule(security_group['egressrule'])
if not rule:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('authorizeSecurityGroupEgress', **args)
poll_async = self.module.params.get('poll_async')
if res and poll_async:
security_group = self.poll_job(res, 'securitygroup')
key = sg_type + "rule" # ingressrule / egressrule
if key in security_group:
rule = security_group[key][0]
return rule
def remove_rule(self):
security_group = self.get_security_group()
rule = None
res = None
sg_type = self.module.params.get('type')
if sg_type == 'ingress':
rule = self._get_rule(security_group['ingressrule'])
if rule:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('revokeSecurityGroupIngress', id=rule['ruleid'])
elif sg_type == 'egress':
rule = self._get_rule(security_group['egressrule'])
if rule:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('revokeSecurityGroupEgress', id=rule['ruleid'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'securitygroup')
return rule
def get_result(self, security_group_rule):
super(AnsibleCloudStackSecurityGroupRule, self).get_result(security_group_rule)
self.result['type'] = self.module.params.get('type')
self.result['security_group'] = self.module.params.get('security_group')
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
security_group=dict(required=True),
type=dict(choices=['ingress', 'egress'], default='ingress'),
cidr=dict(default='0.0.0.0/0'),
user_security_group=dict(),
protocol=dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'),
icmp_type=dict(type='int'),
icmp_code=dict(type='int'),
start_port=dict(type='int', aliases=['port']),
end_port=dict(type='int'),
state=dict(choices=['present', 'absent'], default='present'),
project=dict(),
poll_async=dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
['icmp_type', 'icmp_code'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
mutually_exclusive=(
['icmp_type', 'start_port'],
['icmp_type', 'end_port'],
['icmp_code', 'start_port'],
['icmp_code', 'end_port'],
),
supports_check_mode=True
)
acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module)
state = module.params.get('state')
if state in ['absent']:
sg_rule = acs_sg_rule.remove_rule()
else:
sg_rule = acs_sg_rule.add_rule()
result = acs_sg_rule.get_result(sg_rule)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,876,380,384,042,106,000 | 32.301546 | 122 | 0.607461 | false |
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8 | lib/python2.7/test/test_multiprocessing.py | 27 | 76391 | #!/usr/bin/env python
#
# Unit tests for the multiprocessing package
#
import unittest
import Queue
import time
import sys
import os
import gc
import signal
import array
import socket
import random
import logging
import errno
import test.script_helper
from test import test_support
from StringIO import StringIO
_multiprocessing = test_support.import_module('_multiprocessing')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
# Work around broken sem_open implementations
test_support.import_module('multiprocessing.synchronize')
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = True
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
latin = str
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sys_exit(cls, reason, testfn):
sys.stderr = open(testfn, 'w')
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
return
testfn = test_support.TESTFN
self.addCleanup(test_support.unlink, testfn)
for reason, code in (([1, 2, 3], 1), ('ignore this', 0)):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, code)
with open(testfn, 'r') as f:
self.assertEqual(f.read().rstrip(), str(reason))
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
self.skipTest("requires 'queue.task_done()' method")
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.daemon = True
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), range(10))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_accepts_long(self):
arr = self.Array('i', 10L)
self.assertEqual(len(arr), 10)
raw_arr = self.RawArray('i', 10L)
self.assertEqual(len(raw_arr), 10)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
self.assertRaises(ValueError, multiprocessing.Pool, -1)
self.assertRaises(ValueError, multiprocessing.Pool, 0)
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
res = p.apply_async(unpickleable_result)
self.assertRaises(MaybeEncodingError, res.get)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
debug_info = self.manager._debug_info()
if refs != EXPECTED_NUMBER:
print self.manager._debug_info()
print debug_info
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.start()
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
with open(test_support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test_support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
with open(test_support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test_support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
"""
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
"""
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', range(10), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = [
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.synchronize', 'multiprocessing.util'
]
if HAS_REDUCTION:
modules.append('multiprocessing.reduction')
if c_int is not None:
# This module requires _ctypes
modules.append('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.5)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
p = self.Process(target=time.sleep, args=(1,))
p.start()
p.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
killer.join()
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = _multiprocessing.Connection(44977608)
self.assertRaises(IOError, conn.poll)
self.assertRaises(IOError, _multiprocessing.Connection, -1)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type.capitalize()
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue', 'Pool'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue', 'Pool'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue', 'Pool'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _ThisSubProcess(q):
try:
item = q.get(block=False)
except Queue.Empty:
pass
def _TestProcess(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_TestProcess, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if WIN32:
rc, out, err = test.script_helper.assert_python_failure(name)
self.assertEqual('', out.decode('ascii'))
self.assertIn('RuntimeError', err.decode('ascii'))
else:
rc, out, err = test.script_helper.assert_python_ok(name)
self.assertEqual('123', out.decode('ascii').rstrip())
self.assertEqual('', err.decode('ascii'))
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test.test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-B', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recurisvely start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
p.join()
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
new_size = r.recv()
p.join()
self.assertLessEqual(new_size, old_size)
#
#
#
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
TestStdinBadfiledescriptor, TestTimeouts, TestNoForkBomb,
TestFlags, TestForkAwareThreadLock]
#
#
#
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, see issue 3111!")
check_enough_semaphores()
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
# (ncoghlan): Whether or not sys.exc_clear is executed by the threading
# module during these tests is at least platform dependent and possibly
# non-deterministic on any given platform. So we don't mind if the listed
# warnings aren't actually raised.
with test_support.check_py3k_warnings(
(".+__(get|set)slice__ has been removed", DeprecationWarning),
(r"sys.exc_clear\(\) not supported", DeprecationWarning),
quiet=True):
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
| gpl-2.0 | -2,977,560,226,234,707,500 | 29.229917 | 88 | 0.585658 | false |
praveen-pal/edx-platform | common/djangoapps/terrain/steps.py | 5 | 6146 | #pylint: disable=C0111
#pylint: disable=W0621
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
#pylint: disable=W0401
# Disable the "Unused import %s from wildcard import" warning
#pylint: disable=W0614
# Disable the "unused argument" warning because lettuce uses "step"
#pylint: disable=W0613
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from lettuce.django import django_url
from nose.tools import assert_equals # pylint: disable=E0611
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+)" seconds?$')
def wait(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.browser.reload()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert world.browser.title == 'Dashboard'
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
assert world.url_equals(path)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
world.register_by_course_id(course_id, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
multiplier = 1
if world.SAUCE_ENABLED:
multiplier = 2
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5*multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5*multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(django_url('/'))
# You should not see the login link
assert world.is_css_not_present('a#login')
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
world.browser.visit(django_url(url))
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
| agpl-3.0 | 3,205,520,309,022,815,000 | 28.834951 | 115 | 0.677514 | false |
roadmapper/ansible | lib/ansible/modules/cloud/ovirt/_ovirt_affinity_label_facts.py | 2 | 6836 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_affinity_label_facts
short_description: Retrieve information about one or more oVirt/RHV affinity labels
author: "Ondra Machacek (@machacekondra)"
deprecated:
removed_in: "2.10"
why: When migrating to collection we decided to use only _info modules.
alternative: Use M(ovirt_affinity_label_info) instead
version_added: "2.3"
description:
- "Retrieve information about one or more oVirt/RHV affinity labels."
- This module was called C(ovirt_affinity_label_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(ovirt_affinity_label_info) module no longer returns C(ansible_facts)!
notes:
- "This module returns a variable C(ovirt_affinity_labels), which
contains a list of affinity labels. You need to register the result with
the I(register) keyword to use it."
options:
name:
description:
- "Name of the affinity labels which should be listed."
vm:
description:
- "Name of the VM, which affinity labels should be listed."
host:
description:
- "Name of the host, which affinity labels should be listed."
extends_documentation_fragment: ovirt_info
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather information about all affinity labels, which names start with C(label):
- ovirt_affinity_label_info:
name: label*
register: result
- debug:
msg: "{{ result.ovirt_affinity_labels }}"
# Gather information about all affinity labels, which are assigned to VMs
# which names start with C(postgres):
- ovirt_affinity_label_info:
vm: postgres*
register: result
- debug:
msg: "{{ result.ovirt_affinity_labels }}"
# Gather information about all affinity labels, which are assigned to hosts
# which names start with C(west):
- ovirt_affinity_label_info:
host: west*
register: result
- debug:
msg: "{{ result.ovirt_affinity_labels }}"
# Gather information about all affinity labels, which are assigned to hosts
# which names start with C(west) or VMs which names start with C(postgres):
- ovirt_affinity_label_info:
host: west*
vm: postgres*
register: result
- debug:
msg: "{{ result.ovirt_affinity_labels }}"
'''
RETURN = '''
ovirt_affinity_labels:
description: "List of dictionaries describing the affinity labels. Affinity labels attributes are mapped to dictionary keys,
all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.common.removed import removed_module
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_info_full_argument_spec,
search_by_name,
)
def main():
argument_spec = ovirt_info_full_argument_spec(
name=dict(default=None),
host=dict(default=None),
vm=dict(default=None),
)
module = AnsibleModule(argument_spec)
is_old_facts = module._name == 'ovirt_affinity_label_facts'
if is_old_facts:
module.deprecate("The 'ovirt_affinity_label_facts' module has been renamed to 'ovirt_affinity_label_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
affinity_labels_service = connection.system_service().affinity_labels_service()
labels = []
all_labels = affinity_labels_service.list()
if module.params['name']:
labels.extend([
l for l in all_labels
if fnmatch.fnmatch(l.name, module.params['name'])
])
if module.params['host']:
hosts_service = connection.system_service().hosts_service()
if search_by_name(hosts_service, module.params['host']) is None:
raise Exception("Host '%s' was not found." % module.params['host'])
labels.extend([
label
for label in all_labels
for host in connection.follow_link(label.hosts)
if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
])
if module.params['vm']:
vms_service = connection.system_service().vms_service()
if search_by_name(vms_service, module.params['vm']) is None:
raise Exception("Vm '%s' was not found." % module.params['vm'])
labels.extend([
label
for label in all_labels
for vm in connection.follow_link(label.vms)
if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
])
if not (module.params['vm'] or module.params['host'] or module.params['name']):
labels = all_labels
result = dict(
ovirt_affinity_labels=[
get_dict_of_struct(
struct=l,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for l in labels
],
)
if is_old_facts:
module.exit_json(changed=False, ansible_facts=result)
else:
module.exit_json(changed=False, **result)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
removed_module("2.10")
| gpl-3.0 | -5,892,429,747,630,375,000 | 34.978947 | 156 | 0.647747 | false |
sayanchowdhury/fedmsg_meta_fedora_infrastructure | fedmsg_meta_fedora_infrastructure/tests/pkgdb.py | 4 | 85348 | # This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <[email protected]>
#
""" Tests for pkgdb messages """
import unittest
from fedmsg.tests.test_meta import Base
from .common import add_doc
class TestPkgdbACLUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes these messages when an ACL changes on a package.
"""
expected_title = "pkgdb.acl.update"
expected_subti = ("ralph changed ralph's 'watchbugzilla' permission on "
"python-sh (EL-6) to 'Awaiting Review'")
expected_link = "https://admin.fedoraproject.org/pkgdb/package/python-sh/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['python-sh'])
expected_usernames = set(['ralph'])
expected_objects = set(['python-sh/acls/EL-6/watchbugzilla/ralph'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357576703.125622,
"topic": "org.fedoraproject.stg.pkgdb.acl.update",
"msg": {
"status": "Awaiting Review",
"username": "ralph",
"package_listing": {
"point_of_contact": "grover",
"package": {
"upstreamurl": None,
"name": "python-sh",
"description": None,
"reviewurl": None,
"summary": "Python module to simplify calling "
"shell commands"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora EPEL",
"publishurltemplate": None,
"version": "6",
"disttag": ".el6",
"branchname": "EL-6"
},
"specfile": None
},
"agent": "ralph",
"acl": "watchbugzilla"
}
}
class TestPkgdbPackageNew(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes this message when a new package is added to the DB. This
typically happens near the end of the Package Review Process as a
result of a `SCM Admin Request
<http://fedoraproject.org/wiki/Package_SCM_admin_requests>`_.
"""
expected_title = "pkgdb.package.new"
expected_subti = "ralph added a new package 'php-zmq' (devel)"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/create'])
msg = {
"username": "apache",
"i": 3,
"timestamp": 1357580533.5999,
"topic": "org.fedoraproject.stg.pkgdb.package.new",
"msg": {
"package_listing": {
"point_of_contact": "lmacken",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora",
"publishurltemplate": None,
"version": "19",
"disttag": ".f19",
"branchname": "devel"
},
"specfile": None
},
"agent": "ralph"
}
}
class TestPkgdbOwnerUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes this message when a package gets an new owner. (It is
also published when a package is orphaned; the 'owner' field will have
the string 'orphan' as its value.)
"""
expected_title = "pkgdb.owner.update"
expected_subti = "ralph changed owner of php-zmq (EL-6) to 'orphan'"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/owner/EL-6'])
msg = {
"username": "apache",
"i": 3,
"timestamp": 1357580533.5999,
"topic": "org.fedoraproject.stg.pkgdb.owner.update",
"msg": {
"package_listing": {
"point_of_contact": "orphan",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora EPEL",
"publishurltemplate": None,
"version": "6",
"disttag": ".el6",
"branchname": "EL-6"
},
"specfile": None
},
"agent": "ralph"
}
}
class TestLegacyPkgdbACLRequestToggle(Base):
""" The old Fedora Package DB1 published this message when an ACL request
was toggled on a package.
"""
expected_title = "pkgdb.acl.request.toggle"
expected_subti = "ralph has requested 'commit' on php-zmq (EL-6)"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/acls/EL-6/commit/ralph'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357581512.006664,
"topic": "org.fedoraproject.stg.pkgdb.acl.request.toggle",
"msg": {
"acl_action": "requested",
"package_listing": {
"owner": "orphan",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora EPEL",
"publishurltemplate": None,
"version": "6",
"disttag": ".el6",
"branchname": "EL-6"
},
"specfile": None
},
"acl_status": "Awaiting Review",
"agent": "ralph",
"acl": "commit"
}
}
class TestLegacyPkgdbPackageUpdate(Base):
""" Test old school messages. """
expected_title = "pkgdb.package.update"
expected_subti = "ralph made some updates to php-zmq"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/update'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357581512.006664,
"topic": "org.fedoraproject.stg.pkgdb.package.update",
"msg": {
"acl_action": "requested",
"package": "php-zmq",
"agent": "ralph",
},
}
class TestPkgdbPackageUpdateStatus(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes this message when the status of a package is updated.
"""
expected_title = "pkgdb.package.update.status"
expected_subti = "ralph unretired guake in F-18"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/guake/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['ralph'])
expected_objects = set(['guake/update'])
msg = {
"username": "apache",
"i": 144,
"timestamp": 1379605523.496933,
"msg_id": "2013-c131fb95-0a2e-4426-95c3-09766e017d29",
"topic": "org.fedoraproject.dev.pkgdb.package.update.status",
"msg": {
"status": "Approved",
"package_listing": {
"package": {
"status": "Approved",
"upstream_url": "http://guake.org",
"name": "guake",
"creation_date": 1379619917.0,
"summary": "Top down terminal for GNOME",
"review_url": "https://bugzilla.redhat.com/450189"
},
"collection": {
"pendingurltemplate": None,
"publishurltemplate": None,
"branchname": "F-18",
"name": "Fedora",
"version": "18"
},
"point_of_contact": "pingou"
},
"prev_status": "Retired",
"agent": "ralph",
"package_name": "guake"
}
}
class TestPkgdbPackageUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes this message when metadata for a package is updated.
"""
expected_title = "pkgdb.package.update"
expected_subti = "pkgdb_updater updated: summary, description of guake"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/guake/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"1ff483b03adb34142ac55a5efecfa71b0149d57566f86d969905005b0ab98def"
"?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['pkgdb_updater'])
expected_objects = set(['guake/update'])
msg = {
"username": "apache",
"i": 144,
"timestamp": 1379605523.496933,
"msg_id": "2013-c131fb95-0a2e-4426-95c3-09766e017d29",
"topic": "org.fedoraproject.dev.pkgdb.package.update",
"msg": {
"package": {
"status": "Approved",
"upstream_url": "http://guake.org",
"name": "guake",
"creation_date": 1379619917.0,
"summary": "Top down terminal for GNOME",
"review_url": "https://bugzilla.redhat.com/450189"
},
"agent": "pkgdb_updater",
"fields": ["summary", "description"],
}
}
class LegacyTestPkgdbBranchClone(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages on this topic when a new branch is cloned for a
package.
"""
expected_title = "pkgdb.branch.clone"
expected_subti = "ralph branched php-zmq f18 from devel"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/branch'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357581512.006664,
"topic": "org.fedoraproject.stg.pkgdb.branch.clone",
"msg": {
"package": "php-zmq",
"branch": "f18",
"master": "devel",
"agent": "ralph",
},
}
class TestLegacyPkgdbCritpathUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages on this topic when the critical path status of a
package changes (when it is either added, or removed from the critical
path). For example:
"""
expected_title = "pkgdb.critpath.update"
expected_subti = "ralph altered the critpath status for some packages"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set([])
expected_usernames = set(['ralph'])
expected_objects = set([])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357581512.006664,
"topic": "org.fedoraproject.stg.pkgdb.critpath.update",
"msg": {
"package_listing_ids": [],
"agent": "ralph",
"critpath": True,
},
}
class TestPkgdbPackageUpdateStatus2(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes this message when the status of a package is updated.
Here's an example of a package being retired:
"""
expected_title = "pkgdb.package.update.status"
expected_subti = "till retired libvmime07 in master"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/libvmime07/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"272bbf32f26ca494a78673f873bb62e8f3deb9f9b53213ceac3c2a144de4784a"
"?s=64&d=retro")
expected_packages = set(['libvmime07'])
expected_usernames = set(['till'])
expected_objects = set(['libvmime07/update'])
msg = {
"source_name": "datanommer",
"i": 7,
"timestamp": 1412710605.0,
"msg_id": "2014-78aa26ee-d2e5-4446-b4a4-73948704d73e",
"topic": "org.fedoraproject.prod.pkgdb.package.update.status",
"source_version": "0.6.4",
"msg": {
"status": "Retired",
"package_listing": {
"status": "Retired",
"point_of_contact": "orphan",
"package": {
"status": "Approved",
"upstream_url": "http://www.zarafa.com/wiki/index.php/Libvmime_patches",
"description": "VMime is a powerful C++ class ...",
"creation_date": 1400070978.0,
"acls": [],
"summary": "A powerful C++ class ...",
"review_url": None,
"name": "libvmime07"
},
"collection": {
"status": "Under Development",
"dist_tag": ".fc22",
"koji_name": "rawhide",
"name": "Fedora",
"version": "devel",
"branchname": "master"
},
"acls": [
{
"fas_name": "robert",
"status": "Approved",
"acl": "watchcommits"
},
{
"fas_name": "robert",
"status": "Approved",
"acl": "watchbugzilla"
},
{
"fas_name": "robert",
"status": "Obsolete",
"acl": "commit"
},
{
"fas_name": "robert",
"status": "Obsolete",
"acl": "approveacls"
}
],
"critpath": False,
"status_change": 1412710603.0
},
"prev_status": "Orphaned",
"package_name": "libvmime07",
"agent": "till"
}
}
class TestLegacyPkgdbPackageRetire(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages on this topic when a package is retired. For example:
"""
expected_title = "pkgdb.package.retire"
expected_subti = "ralph retired php-zmq (EL-6)!"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/retire'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357583297.886945,
"topic": "org.fedoraproject.stg.pkgdb.package.retire",
"msg": {
"package_listing": {
"owner": "orphan",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora EPEL",
"publishurltemplate": None,
"version": "6",
"disttag": ".el6",
"branchname": "EL-6"
},
"specfile": None
},
"retirement": "retired",
"agent": "ralph"
}
}
class LegacyTestPkgdbUserRemove(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
when a user is removed from a package ACL.
"""
expected_title = "pkgdb.acl.user.remove"
expected_subti = "ralph removed ralph from php-zmq (EL-6, F18)"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/remove/ralph'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357583297.886945,
"topic": "org.fedoraproject.stg.pkgdb.acl.user.remove",
"msg": {
"package_listings": [{
"owner": "orphan",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora EPEL",
"publishurltemplate": None,
"version": "6",
"disttag": ".el6",
"branchname": "EL-6"
},
"specfile": None
}, {
"owner": "orphan",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora",
"publishurltemplate": None,
"version": "18",
"disttag": ".f18",
"branchname": "F18"
},
"specfile": None
}],
"collections": [
# This actually has stuff in it in prod.
],
"username": "ralph",
"agent": "ralph",
}
}
class TestPkgdbBranchStart(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when branching starts.
"""
expected_title = "pkgdb.branch.start"
expected_subti = "ralph started a branch of F-19 from devel"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set()
expected_usernames = set(['ralph'])
expected_objects = set()
msg = {
u'username': u'threebean',
u'i': 1,
u'timestamp': 1379606342.105066,
u'msg_id': u'2013-0eaf6d98-6259-4e1c-a113-e2c9284a6082',
u'topic':
u'org.fedoraproject.dev.pkgdb.branch.start',
u'msg': {
u'collection_from': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'devel',
u'name': u'Fedora',
u'version': u'devel'
},
u'collection_to': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'F-19',
u'name': u'Fedora',
u'version': u'19'
},
u'agent': u'ralph',
},
}
class TestLegacyPkgdbBranchStart(Base):
""" This just tests a funny case where 'agent' is a list.. """
expected_title = "pkgdb.branch.start"
expected_subti = "ralph started a branch of F-19 from devel"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set()
expected_usernames = set(['ralph'])
expected_objects = set()
msg = {
u'username': u'threebean',
u'i': 1,
u'timestamp': 1379606342.105066,
u'msg_id': u'2013-0eaf6d98-6259-4e1c-a113-e2c9284a6082',
u'topic':
u'org.fedoraproject.dev.pkgdb.branch.start',
u'msg': {
u'collection_from': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'devel',
u'name': u'Fedora',
u'version': u'devel'
},
u'collection_to': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'F-19',
u'name': u'Fedora',
u'version': u'19'
},
u'agent': [u'ralph'],
},
}
class TestPkgdbBranchComplete(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when branching completes.
"""
expected_title = "pkgdb.branch.complete"
expected_subti = "ralph's branch of F-19 from devel completed"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set()
expected_usernames = set(['ralph'])
expected_objects = set()
msg = {
u'username': u'threebean',
u'i': 1,
u'timestamp': 1379606342.105066,
u'msg_id': u'2013-0eaf6d98-6259-4e1c-a113-e2c9284a6082',
u'topic':
u'org.fedoraproject.dev.pkgdb.branch.complete',
u'msg': {
u'collection_from': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'devel',
u'name': u'Fedora',
u'version': u'devel'
},
u'collection_to': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'F-19',
u'name': u'Fedora',
u'version': u'19'
},
u'agent': u'ralph',
},
}
class TestPkgdbCollectionNew(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin creates a new collection.
"""
expected_title = "pkgdb.collection.new"
expected_subti = "ralph created a new collection for Fedora 19"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set()
expected_usernames = set(['ralph'])
expected_objects = set()
msg = {
u'username': u'threebean',
u'i': 3,
u'timestamp': 1379607327.474346,
u'msg_id': u'2013-68fd388e-60ca-4cf6-888d-b51161798496',
u'topic': u'org.fedoraproject.dev.pkgdb.collection.new',
u'msg': {
u'collection': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'F-19',
u'name': u'Fedora',
u'version': u'19',
},
u'agent': u'ralph',
}
}
class TestPkgdbCollectionUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin creates a new collection.
"""
expected_title = "pkgdb.collection.update"
expected_subti = ("ralph updated the following fields of the Fedora 18 "
"collection: name, version")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set()
expected_usernames = set(['ralph'])
expected_objects = set()
msg = {
u'username': u'threebean',
u'i': 27,
u'timestamp': 1379607692.198447,
u'msg_id': u'2013-478a321f-ddfc-4d4c-adeb-c777619da15a',
u'topic': u'org.fedoraproject.dev.pkgdb.collection.update',
u'msg': {
u'fields': [
u'name',
u'version',
],
u'collection': {
u'pendingurltemplate': u'http://.....',
u'publishurltemplate': u'http://.....',
u'branchname': u'f18_b',
u'name': u'Fedora',
u'version': u'18'
},
u'agent': u'ralph',
}
}
class TestPkgdbDeletePackage(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin **deletes a package** all
together.
"""
expected_title = "pkgdb.package.delete"
expected_subti = ("ausil deleted the 'pipelight' package from the pkgdb")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"a89b57d99dcf12d40ec2b9fb05910b90293b13b0b87415208bedc897bc18a354"
"?s=64&d=retro")
expected_packages = set(['pipelight'])
expected_usernames = set(['ausil'])
expected_objects = set(['pipelight/package/delete'])
msg = {
"i": 46,
"msg_id": "2014-9372bf63-8e32-4257-82ec-38fb5226763a",
"source_name": "datanommer",
"source_version": "0.6.4",
"timestamp": 1408377920.0,
"topic": "org.fedoraproject.prod.pkgdb.package.delete",
"msg": {
"agent": "ausil",
"package": {
"acls": [
{
"acls": [
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "master",
"dist_tag": ".fc22",
"koji_name": "rawhide",
"name": "Fedora",
"status": "Under Development",
"version": "devel"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/"
"1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404850010.0
},
{
"acls": [
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "f19",
"dist_tag": ".fc19",
"koji_name": "f19",
"name": "Fedora",
"status": "Active",
"version": "19"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/"
"1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404850009.0
},
{
"acls": [
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "f20",
"dist_tag": ".fc20",
"koji_name": "f20",
"name": "Fedora",
"status": "Active",
"version": "20"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/"
"1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404850010.0
},
{
"acls": [
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "epel7",
"dist_tag": ".el7",
"koji_name": "epel7",
"name": "Fedora EPEL",
"status": "Under Development",
"version": "7"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/"
"1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404850009.0
},
{
"acls": [
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "f21",
"dist_tag": ".fc21",
"koji_name": "f21",
"name": "Fedora",
"status": "Under Development",
"version": "21"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/"
"1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404997736.0
}
],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
}
},
}
class TestPkgdbDeleteBranch(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin **deletes a branch** of a
particular package.
"""
expected_title = "pkgdb.package.branch.delete"
expected_subti = "ausil deleted the f21 branch of the 'pipelight' package"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"a89b57d99dcf12d40ec2b9fb05910b90293b13b0b87415208bedc897bc18a354"
"?s=64&d=retro")
expected_packages = set(['pipelight'])
expected_usernames = set(['ausil'])
expected_objects = set(['pipelight/f21/delete'])
msg = {
"i": 45,
"msg_id": "2014-fba4c0ac-f5ba-446f-bf70-94200e2d286f",
"source_name": "datanommer",
"source_version": "0.6.4",
"timestamp": 1408377920.0,
"topic": "org.fedoraproject.prod.pkgdb.package.branch.delete",
"msg": {
"agent": "ausil",
"package_listing": {
"acls": [
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "f21",
"dist_tag": ".fc21",
"koji_name": "f21",
"name": "Fedora",
"status": "Under Development",
"version": "21"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using Windows "
"plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404997736.0
}
},
}
class TestPkgdbDeleteAcl(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin **deletes a branch** of a
particular package.
"""
expected_title = "pkgdb.acl.delete"
expected_subti = ("ausil deleted awjb's watchcommits "
"rights from pipelight (f20)")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"a89b57d99dcf12d40ec2b9fb05910b90293b13b0b87415208bedc897bc18a354"
"?s=64&d=retro")
expected_packages = set(['pipelight'])
expected_usernames = set(['ausil', 'awjb'])
expected_objects = set(['pipelight/acls/f20/watchcommits/awjb'])
msg = {
"i": 23,
"msg_id": "2014-f46f0993-ea29-4fe1-af44-807b863a12de",
"source_name": "datanommer",
"source_version": "0.6.4",
"timestamp": 1408377918.0,
"topic": "org.fedoraproject.prod.pkgdb.acl.delete",
"msg": {
"acl": {
"acl": "watchcommits",
"fas_name": "awjb",
"packagelist": {
"collection": {
"branchname": "f20",
"dist_tag": ".fc20",
"koji_name": "f20",
"name": "Fedora",
"status": "Active",
"version": "20"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using Windows "
"plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404850010.0
},
"status": "Approved"
},
"agent": "ausil"
},
}
class TestPkgdbBranchRequest(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an user **requests a new branch** for
a particular package.
"""
expected_title = "pkgdb.package.branch.request"
expected_subti = ("pingou requested branch epel7 for package R-BiocGenerics")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['R-BiocGenerics'])
expected_usernames = set(['pingou'])
expected_objects = set(['R-BiocGenerics/branch/request/epel7/pingou'])
msg = {
"i": 1,
"timestamp": 1408440084,
"msg_id": "2014-250329a1-1ccf-4fc4-ad0c-e24365f89c0f",
"topic": "org.fedoraproject.dev.pkgdb.package.branch.request",
"msg": {
"collection_to": {
"status": "Under Development",
"dist_tag": ".el7",
"koji_name": "epel7",
"name": "Fedora EPEL",
"version": "7",
"branchname": "epel7"
},
"package": {
"status": "Approved",
"upstream_url": None,
"description": None,
"summary": "Generic functions for Bioconductor",
"acls": [],
"creation_date": 1400063778.0,
"review_url": None,
"name": "R-BiocGenerics"
},
"agent": "pingou",
},
}
class TestPkgdbPackageRequest(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an user **requests a new package**
to be added into Package DB.
"""
expected_title = "pkgdb.package.new.request"
expected_subti = ("pingou requested package guake on branch master")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_link = "https://admin.fedoraproject.org/pkgdb/package/guake/"
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['pingou'])
expected_objects = set(['new/package/request/guake/master/pingou'])
msg = {
"i": 3,
"timestamp": 1408440927,
"msg_id": "2014-40c33929-8fa1-4cfb-9559-231af6d809aa",
"topic": "org.fedoraproject.dev.pkgdb.package.new.request",
"msg": {
"info": {
"pkg_summary": "A drop-down terminal for GNOME",
"pkg_collection": "master",
"pkg_review_url": "https://bugzilla.redhat.com/123",
"pkg_upstream_url": "http://guake.org",
"pkg_poc": "pingou",
"pkg_status": "Approved",
"pkg_name": "guake",
"pkg_description": "",
"pkg_critpath": False
},
"agent": "pingou",
"collection": {
"status": "Under Development",
"dist_tag": ".fc22",
"koji_name": "rawhide",
"name": "Fedora",
"version": "devel",
"branchname": "master"
},
"package": None
},
}
class TestPkgdbAdminActionUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin **update the status of an
Admin Action**.
"""
expected_title = "pkgdb.admin.action.status.update"
expected_subti = ("pingou changed pingou's package request for guake "
"in master from Awaiting Review to Approved")
expected_link = "https://admin.fedoraproject.org/pkgdb/package/guake/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['pingou'])
expected_objects = set(['action/18/status/guake/master/pingou'])
msg = {
"i": 6,
"timestamp": 1408441710,
"msg_id": "2014-3a9cba3d-a1d0-4187-9fa0-995d54bf826d",
"topic": "org.fedoraproject.dev.pkgdb.admin.action.status.update",
"msg": {
"action": {
"info": {
'pkg_summary': u'A drop-down terminal for GNOME',
'pkg_status': u'Approved',
'pkg_collection': u'master',
'pkg_name': u'guake',
'pkg_review_url': u'https://bugzilla.redhat.com/123',
'pkg_description': u'',
'pkg_upstream_url': u'http://guake.org',
'pkg_poc': u'pingou',
'pkg_critpath': False
},
"status": "Approved",
"package": None,
"date_updated": 1408441710.0,
"collection": {
"status": "Under Development",
"dist_tag": ".fc22",
"koji_name": "rawhide",
"name": "Fedora",
"version": "devel",
"branchname": "master"
},
"user": "pingou",
"action": "request.package",
"date_created": 1408433727.0,
"from_collection": None,
"id": 18
},
"old_status": "Awaiting Review",
"new_status": "Approved",
"agent": "pingou"
},
}
class TestPkgdbAdminActionUpdate_Denied(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when a request for a new branch/package is
**denied/blocked**.
"""
expected_title = "pkgdb.admin.action.status.update"
expected_subti = ("pingou changed pingou's branch request for R-Biobase "
"in epel7 from Awaiting Review to Denied "
"with message: "
"This package should not be branched for EPEL7")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['R-Biobase'])
expected_usernames = set(['pingou'])
expected_objects = set(['action/2/status/R-Biobase/epel7/pingou'])
msg = {
"i": 1,
"timestamp": 1421830060,
"msg_id": "2015-1acdeda2-e571-4071-a893-cc2b7ba46b02",
"topic": "org.fedoraproject.dev.pkgdb.admin.action.status.update",
"msg": {
"action": {
"info": {},
"status": "Denied",
"package": {
"status": "Approved",
"upstream_url": "http://bioconductor.org/packages/release/bioc/html/Biobase.html",
"monitor": False,
"description": "Base functions for Bioconductor (bioconductor.org). Biobase provides\nfunctions that are needed by many other Bioconductor packages or which\nreplace R functions.",
"summary": "Base functions for Bioconductor",
"acls": [],
"creation_date": 1400063778.0,
"review_url": None,
"name": "R-Biobase"
},
"date_updated": 1421830060.0,
"collection": {
"status": "Under Development",
"dist_tag": ".el7",
"koji_name": "epel7",
"name": "Fedora EPEL",
"version": "7",
"branchname": "epel7"
},
"user": "pingou",
"action": "request.branch",
"date_created": 1421227282.0,
"message": "This package should not be branched for EPEL7",
"id": 2
},
"old_status": "Awaiting Review",
"new_status": "Denied",
"agent": "pingou"
}
}
class TestPkgdbCritpathUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin **updates the critpath flag on
a package**.
"""
expected_title = "pkgdb.package.critpath.update"
expected_subti = ("pingou set the critpath flag on the "
"openbox package (f21)")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['openbox'])
expected_usernames = set(['pingou'])
expected_objects = set(['openbox/critpath'])
msg = {
"msg_id": "2014-dbb1c4d3-2ffa-4212-9daa-1479bf11e8a4",
"source_name": "datanommer",
"source_version": "0.6.4",
"timestamp": 1408557412.0,
"topic": "org.fedoraproject.prod.pkgdb.package.critpath.update",
"i": 35,
"msg": {
"agent": "pingou",
"branches": [
"f21"
],
"critpath": True,
"package": {
"acls": [
{
"acls": [
{
"acl": "watchcommits",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "mlichvar",
"status": "Approved"
}
],
"collection": {
"branchname": "FC-5",
"dist_tag": ".fc5",
"koji_name": None,
"name": "Fedora",
"status": "EOL",
"version": "5"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1400070978.0,
"name": "openbox",
"review_url": None,
"status": "Approved",
"summary": "A highly configurable and "
"standards-compliant X11 window manager",
"upstream_url": None
},
"point_of_contact": "mlichvar",
"status": "Approved",
"status_change": 1400071632.0
},
{
"acls": [
{
"acl": "watchcommits",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "mlichvar",
"status": "Approved"
}
],
"collection": {
"branchname": "FC-4",
"dist_tag": ".fc4",
"koji_name": None,
"name": "Fedora",
"status": "EOL",
"version": "4"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1400070978.0,
"name": "openbox",
"review_url": None,
"status": "Approved",
"summary": "A highly configurable and "
"standards-compliant X11 window manager",
"upstream_url": None
},
"point_of_contact": "mlichvar",
"status": "Approved",
"status_change": 1400071632.0
},
{
"acls": [
{
"acl": "watchcommits",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "cwickert",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "cwickert",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "cwickert",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "athmane",
"status": "Obsolete"
},
{
"acl": "watchbugzilla",
"fas_name": "athmane",
"status": "Obsolete"
}
],
"collection": {
"branchname": "f21",
"dist_tag": ".fc21",
"koji_name": "f21",
"name": "Fedora",
"status": "Under Development",
"version": "21"
},
"critpath": True,
"package": {
"acls": [],
"creation_date": 1400070978.0,
"name": "openbox",
"review_url": None,
"status": "Approved",
"summary": "A highly configurable and "
"standards-compliant X11 window manager",
"upstream_url": None
},
"point_of_contact": "mlichvar",
"status": "Approved",
"status_change": 1408557402.0
}
],
"creation_date": 1400070978.0,
"description": "Openbox is a window manager designed ...",
"name": "openbox",
"review_url": None,
"status": "Approved",
"summary": "A highly configurable and "
"standards-compliant X11 window manager",
"upstream_url": None
}
},
}
class TestPkgdbPackageBranchNew(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when a **new branch** is created for a
package.
"""
expected_title = "pkgdb.package.branch.new"
expected_subti = ("pingou created the branch 'epel7' for the package "
"'R-BSgenome'")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['R-BSgenome'])
expected_usernames = set(['pingou'])
expected_objects = set(['R-BSgenome/epel7/new'])
msg = {
"i": 1,
"timestamp": 1408957258,
"msg_id": "2014-645038a7-1f95-4a81-aa68-489c0ae55803",
"topic": "org.fedoraproject.dev.pkgdb.package.branch.new",
"msg": {
"package_listing": {
"status": "Approved",
"package": {
"status": "Approved",
"upstream_url": None,
"description": None,
"summary": "Infrastructure shared by all the "
"Biostrings-based genome",
"acls": [],
"creation_date": 1400063778.0,
"review_url": None,
"name": "R-BSgenome"
},
"point_of_contact": "pingou",
"collection": {
"status": "Under Development",
"dist_tag": ".el7",
"koji_name": "epel7",
"name": "Fedora EPEL",
"version": "7",
"branchname": "epel7"
},
"critpath": False,
"status_change": 1408950057.0
},
"agent": "pingou",
"package": {
"status": "Approved",
"upstream_url": None,
"description": None,
"summary": "Infrastructure shared by all the "
"Biostrings-based genome",
"acls": [],
"creation_date": 1400063778.0,
"review_url": None,
"name": "R-BSgenome"
}
}
}
class TestPkgdbPackageMonitorUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when someone changes the
`monitoring <https://fedoraproject.org/wiki/Upstream_release_monitoring>`_
status of a package.
"""
expected_title = "pkgdb.package.monitor.update"
expected_subti = ("pingou set the monitor flag of guake to False")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['pingou'])
expected_objects = set(['guake/monitor/false'])
msg = {
"username": "pingou",
"i": 3,
"timestamp": 1412957736,
"msg_id": "2014-905aaa3c-483d-4923-95f7-56a8da38da62",
"topic": "org.fedoraproject.dev.pkgdb.package.monitor.update",
"msg": {
"status": False,
"agent": "pingou",
"package": {
"status": "Approved",
"upstream_url": "http://www.guake.org/",
"description": "Guake is a drop-down terminal for Gnome Desktop Environment,\nso you just need to press a key to invoke him,\nand press again to hide.",
"summary": "Drop-down terminal for GNOME",
"acls": [],
"creation_date": 1397204290.0,
"review_url": None,
"name": "guake"
}
}
}
class TestPkgdbPackageUnretireRequest(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when someone asks that a package is
**unretired**.
"""
expected_title = "pkgdb.package.unretire.request"
expected_subti = ("moceap asks that netbeans-platform8 be unretired on "
"master")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"360e1873c56312ea5866123f5ffaf4e07d419570b03af7f475c0d20c7501db06"
"?s=64&d=retro")
expected_packages = set(['netbeans-platform8'])
expected_usernames = set(['moceap'])
expected_objects = set(['netbeans-platform8/unretire/master'])
msg = {
'i': 1,
'timestamp': 1427823120,
'msg_id': '2015-bb28a398-e638-4509-9fa0-57d41c2ae0a4',
'topic': 'org.fedoraproject.prod.pkgdb.package.unretire.request',
'msg': {
'collection': {
'status': 'UnderDevelopment',
'dist_tag': '.fc23',
'koji_name': 'rawhide',
'name': 'Fedora',
'version': 'devel',
'branchname': 'master'
},
'agent': 'moceap',
'package': {
'status': 'Approved',
'upstream_url': None,
'monitor': False,
'summary': 'NetBeansPlatform8',
'name': 'netbeans-platform8',
'acls': [
{
'status': 'Retired',
'point_of_contact': 'orphan',
'package': {
'status': 'Approved',
'upstream_url': None,
'monitor': False,
'summary': 'NetBeansPlatform8',
'name': 'netbeans-platform8',
'acls': [],
'creation_date': 1400070978.0,
'review_url': None,
'description': 'NetBeansPlatformisaframeworkfordevelopmentof\nRichClientSwingApplications.Itcontainspowerful\nmodulesystemandasetofmodulesprovidingvarious\nfunctionalitiesneededforsimplificationof\ndevelopmentofmodulardesktopapplications.'
},
'collection': {
'status': 'UnderDevelopment',
'dist_tag': '.fc23',
'koji_name': 'rawhide',
'name': 'Fedora',
'version': 'devel',
'branchname': 'master'
},
'critpath': False,
'status_change': 1400071169.0
},
{
'status': 'Approved',
'point_of_contact': 'victorv',
'package': {
'status': 'Approved',
'upstream_url': None,
'monitor': False,
'summary': 'NetBeansPlatform8',
'name': 'netbeans-platform8',
'acls': [],
'creation_date': 1400070978.0,
'review_url': None,
'description': 'NetBeansPlatformisaframeworkfordevelopmentof\nRichClientSwingApplications.Itcontainspowerful\nmodulesystemandasetofmodulesprovidingvarious\nfunctionalitiesneededforsimplificationof\ndevelopmentofmodulardesktopapplications.'
},
'collection': {
'status': 'EOL',
'dist_tag': '.fc10',
'koji_name': 'dist-f10',
'name': 'Fedora',
'version': '10',
'branchname': 'f10'
},
'acls': [
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'watchcommits'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'watchbugzilla'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'commit'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'approveacls'
}
],
'critpath': False,
'status_change': 1400071253.0
},
{
'status': 'Approved',
'point_of_contact': 'victorv',
'package': {
'status': 'Approved',
'upstream_url': None,
'monitor': False,
'summary': 'NetBeansPlatform8',
'name': 'netbeans-platform8',
'acls': [
],
'creation_date': 1400070978.0,
'review_url': None,
'description': 'NetBeansPlatformisaframeworkfordevelopmentof\nRichClientSwingApplications.Itcontainspowerful\nmodulesystemandasetofmodulesprovidingvarious\nfunctionalitiesneededforsimplificationof\ndevelopmentofmodulardesktopapplications.'
},
'collection': {
'status': 'EOL',
'dist_tag': '.fc11',
'koji_name': 'dist-f11',
'name': 'Fedora',
'version': '11',
'branchname': 'f11'
},
'acls': [
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'watchcommits'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'watchbugzilla'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'commit'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'approveacls'
}
],
'critpath': False,
'status_change': 1400071427.0
},
{
'status': 'Orphaned',
'point_of_contact': 'orphan',
'package': {
'status': 'Approved',
'upstream_url': None,
'monitor': False,
'summary': 'NetBeansPlatform8',
'name': 'netbeans-platform8',
'acls': [
],
'creation_date': 1400070978.0,
'review_url': None,
'description': 'NetBeansPlatformisaframeworkfordevelopmentof\nRichClientSwingApplications.Itcontainspowerful\nmodulesystemandasetofmodulesprovidingvarious\nfunctionalitiesneededforsimplificationof\ndevelopmentofmodulardesktopapplications.'
},
'collection': {
'status': 'EOL',
'dist_tag': '.fc12',
'koji_name': 'dist-f12',
'name': 'Fedora',
'version': '12',
'branchname': 'f12'
},
'critpath': False,
'status_change': 1400071659.0
}
],
'creation_date': 1400070978.0,
'review_url': None,
'description': 'NetBeansPlatformisaframeworkfordevelopmentof\nRichClientSwingApplications.Itcontainspowerful\nmodulesystemandasetofmodulesprovidingvarious\nfunctionalitiesneededforsimplificationof\ndevelopmentofmodulardesktopapplications.'
}
}
}
class TestPkgdbPackageKoscheiUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when someone changes the
`koschei <https://apps.fedoraproject.org/koschei>`_ status of a package.
"""
expected_title = "pkgdb.package.koschei.update"
expected_subti = ("pingou set the koschei monitoring flag of guake to True")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['pingou'])
expected_objects = set(['guake/koschei/true'])
msg = {
'username': u'pierrey',
'i': 3,
'timestamp': 1435313134,
'msg_id': u'2015-7d0ecbd6-6892-4b34-98ff-b212d1fef74e',
'topic': u'org.fedoraproject.dev.pkgdb.package.koschei.update',
'msg': {
'status': True,
'agent': u'pingou',
'package': {
'status': u'Approved',
'upstream_url': u'http: //www.guake.org/',
'koschei_monitor': True,
'monitor': False,
'summary': u'Drop-downterminalforGNOME',
'name': u'guake',
'acls': [
],
'creation_date': 1400063778.0,
'review_url': None,
'description': 'Guake is a drop-down terminal for Gnome'
}
}
}
add_doc(locals())
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | -135,555,807,236,765,020 | 39.836364 | 267 | 0.42513 | false |
Panagiotis-Kon/empower-runtime | empower-manager.py | 2 | 13767 | #!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Empower Manager."""
import sys
import getpass
import base64
import json
from http.client import HTTPConnection
from uuid import UUID
from argparse import ArgumentParser
def get_connection(gargs):
""" Fetch url from option parser. """
if gargs.transport == "http":
conn = HTTPConnection(gargs.host, gargs.port)
else:
raise ValueError("transport not supported: %s" % gargs.transport)
if gargs.no_passwd:
return (conn, {})
if gargs.passwdfile is None:
passwd = getpass.getpass("Password: ")
else:
passwd = open(gargs.passwdfile, "r").read().strip()
auth_str = "%s:%s" % (gargs.user, passwd)
auth = base64.b64encode(auth_str.encode('utf-8'))
headers = {'Authorization': 'Basic %s' % auth.decode('utf-8')}
return (conn, headers)
def pa_none(args, cmd):
""" Null parser method. """
parser = ArgumentParser(usage=USAGE.format(cmd), description=DESCS[cmd])
(args, leftovers) = parser.parse_known_args(args)
return args, leftovers
def pa_component_info(args, cmd):
""" component info parser method. """
usage = "%s <component>" % USAGE.format(cmd)
desc = DESCS[cmd]
(args, leftovers) = ArgumentParser(usage=usage,
description=desc).parse_known_args(args)
return args, leftovers
def pa_tenant_component_info(args, cmd):
""" component info parser method. """
usage = "%s <tenant_id> <component>" % USAGE.format(cmd)
desc = DESCS[cmd]
(args, leftovers) = ArgumentParser(usage=usage,
description=desc).parse_known_args(args)
return args, leftovers
def pa_load_component(args, cmd):
""" component info parser method. """
usage = "%s [<tenant_id>] <component>" % USAGE.format(cmd)
desc = DESCS[cmd]
(args, leftovers) = ArgumentParser(usage=usage,
description=desc).parse_known_args(args)
return args, leftovers
def pa_list_tenant_components(args, cmd):
""" component info parser method. """
usage = "%s <tenant_id>" % USAGE.format(cmd)
desc = DESCS[cmd]
(args, leftovers) = ArgumentParser(usage=usage,
description=desc).parse_known_args(args)
return args, leftovers
def pa_feed_on(args, cmd):
""" component info parser method. """
usage = "%s <feed id>" % USAGE.format(cmd)
desc = DESCS[cmd]
(args, leftovers) = ArgumentParser(usage=usage,
description=desc).parse_known_args(args)
return args, leftovers
def pa_feed_off(args, cmd):
""" component info parser method. """
usage = "%s <feed id>" % USAGE.format(cmd)
desc = DESCS[cmd]
(args, leftovers) = ArgumentParser(usage=usage,
description=desc).parse_known_args(args)
return args, leftovers
def pa_reboot(args, cmd):
""" Reboot WTP parser method. """
usage = "%s <wtp>" % USAGE.format(cmd)
desc = DESCS[cmd]
(args, leftovers) = ArgumentParser(usage=usage,
description=desc).parse_known_args(args)
return args, leftovers
def pa_help(args, cmd):
""" Help option parser. """
usage = "%s <cmd>" % USAGE.format(cmd)
(args, leftovers) = ArgumentParser(usage=usage).parse_known_args(args)
return args, leftovers
def do_help(gargs, args, leftovers):
""" Help execute method. """
if len(leftovers) != 1:
print("No command specified")
print_available_cmds()
sys.exit()
try:
(parse_args, _) = CMDS[leftovers[0]]
parse_args(['--help'], leftovers[0])
except KeyError:
print("Invalid command: %s is an unknown command." % leftovers[0])
sys.exit()
def do_list_components(gargs, args, leftovers):
""" List currently defined components. """
code, data = connect(gargs, ('GET', '/api/v1/components'))
if code[0] != 200:
print("%s %s" % code)
sys.exit()
for entry in data:
print(entry)
def do_list_tenant_components(gargs, args, leftovers):
""" List currently defined components. """
if len(leftovers) != 1:
print("Invalid parameter, run help list-tenant-components")
print_available_cmds()
sys.exit()
tenant_id = UUID(leftovers[0])
url = '/api/v1/tenants/%s/components' % tenant_id
code, data = connect(gargs, ('GET', url))
if code[0] != 200:
print("%s %s" % code)
sys.exit()
for entry in data:
print(entry)
def do_list_wtps(gargs, args, leftovers):
""" List currently defined components. """
code, data = connect(gargs, ('GET', '/api/v1/wtps'))
if code[0] != 200:
print("%s %s" % code)
sys.exit()
for entry in data:
connection = ""
if 'connection' in entry and entry['connection']:
connection = "at %s" % entry['connection'][0]
line = "%s last seen %s %s" % (entry['addr'],
entry['last_seen'],
connection)
print(line)
def do_list_feeds(gargs, args, leftovers):
""" List currently defined components. """
code, data = connect(gargs, ('GET', '/api/v1/feeds'))
if code[0] != 200:
print("%s %s" % code)
sys.exit()
for entry in data:
connection = ""
if 'mngt' in entry and entry['mngt']:
connection = "at %s" % entry['mngt'][0]
status = "n/a"
for datastream in entry['datastreams']:
if datastream['id'] == 'switch':
if datastream['current_value'] == 0.0:
status = "on"
else:
status = "off"
line = "feed %s %s %s" % (entry['id'], connection, status)
print(line)
def do_component_info(gargs, args, leftovers):
""" List component info. """
if len(leftovers) != 1:
print("Invalid parameter, run help component-info")
print_available_cmds()
sys.exit()
url = '/api/v1/components/%s' % leftovers[0]
code, data = connect(gargs, ('GET', url))
if code[0] != 200:
print("%s %s" % code)
sys.exit()
print("componentd_id: %s" % leftovers[0])
for key, value in data.items():
print("%s: %s" % (key, value))
def do_tenant_component_info(gargs, args, leftovers):
""" List component info. """
if len(leftovers) != 2:
print("Invalid parameter, run help tenant-component-info")
print_available_cmds()
sys.exit()
url = '/api/v1/tenants/%s/components/%s' % tuple(leftovers)
code, data = connect(gargs, ('GET', url))
if code[0] != 200:
print("%s %s" % code)
sys.exit()
print("componentd_id: %s" % leftovers[0])
for key, value in data.items():
print("%s: %s" % (key, value))
def do_load_component(gargs, args, leftovers):
""" List component info. """
if len(leftovers) == 1:
url = '/api/v1/components'
elif len(leftovers) == 2:
uuid = UUID(leftovers[0])
url = '/api/v1/tenants/%s/components' % uuid
else:
print("Invalid parameter, run help tenant-component-info")
print_available_cmds()
sys.exit()
data = {"version": "1.0", "argv": leftovers[1]}
code, data = connect(gargs, ('POST', url), data)
if code[0] != 200:
print("%s %s" % code)
sys.exit()
print("componentd_id: %s" % leftovers[0])
for key, value in data.items():
print("%s: %s" % (key, value))
def do_feed_on(gargs, args, leftovers):
""" List component info. """
data = {"version": "1.0", "value": "0"}
code, data = connect(gargs,
('PUT', '/api/v1/feeds/%s' % leftovers[0]),
data)
if code[0] != 204:
print("%s %s" % code)
sys.exit()
def do_feed_off(gargs, args, leftovers):
""" List component info. """
data = {"version": "1.0", "value": "1"}
code, data = connect(gargs,
('PUT', '/api/v1/feeds/%s' % leftovers[0]),
data)
if code[0] != 204:
print("%s %s" % code)
sys.exit()
def do_reboot(gargs, args, leftovers):
""" List component info. """
connection, headers = get_connection(gargs)
cmd = ('GET', '/api/v1/wtps/%s' % leftovers[0])
code, data = run_connect(connection, headers, cmd)
if code[0] != 200:
print("%s %s" % code)
sys.exit()
if 'feed' not in data:
print("no feed")
sys.exit()
feed_id = data['feed']['id']
cmd = ('PUT', '/api/v1/feeds/%s' % feed_id)
data = {"version": "1.0", "value": "1"}
code, data = run_connect(connection, headers, cmd, data)
if code[0] != 204:
print("%s %s" % code)
sys.exit()
print("feed off")
data = {"version": "1.0", "value": "0"}
code, data = run_connect(connection, headers, cmd, data)
if code[0] != 204:
print("%s %s" % code)
sys.exit()
print("feed on")
def connect(gargs, cmd, data=None):
""" Run command. """
connection, headers = get_connection(gargs)
response, body = run_connect(connection, headers, cmd, data)
return response, body
def run_connect(connection, headers, cmd, data=None):
""" Run command. """
headers['Content-type'] = 'application/json'
connection.request(cmd[0], cmd[1], headers=headers, body=json.dumps(data))
response = connection.getresponse()
str_response = response.read().decode('utf-8')
if str_response:
return (response.code, response.reason), json.loads(str_response)
else:
return (response.code, response.reason), None
CMDS = {
'help': (pa_help, do_help),
'list-components': (pa_none, do_list_components),
'load-component': (pa_load_component, do_load_component),
'list-tenant-components': (pa_list_tenant_components,
do_list_tenant_components),
'component-info': (pa_component_info, do_component_info),
'tenant-component-info': (pa_tenant_component_info,
do_tenant_component_info),
'list-wtps': (pa_none, do_list_wtps),
'list-feeds': (pa_none, do_list_feeds),
'feed-on': (pa_feed_on, do_feed_on),
'feed-off': (pa_feed_off, do_feed_off),
'reboot': (pa_reboot, do_reboot),
}
USAGE = "%(prog)s {0}"
URL = "%s://%s%s:%s"
DESCS = {
'help': "Print help message.",
'load-component': "Load component.",
'list-components': "List components.",
'list-tenant-components': "List tenant components.",
'component-info': "Displays components info.",
'tenant-component-info': "Displays tenant components info.",
'list-wtps': "List WTPs.",
'list-feeds': "List Feeds.",
'feed-on': "Turn feed on.",
'feed-off': "Turn feed on.",
'reboot': "Reboot node.",
}
def parse_global_args(arglist):
""" Parse global arguments list. """
usage = "%s [options] command [command_args]" % sys.argv[0]
args = []
while len(arglist) != 0 and arglist[0] not in CMDS:
args.append(arglist[0])
arglist.pop(0)
parser = ArgumentParser(usage=usage)
parser.add_argument("-r", "--host", dest="host", default="127.0.0.1",
help="REST server address; default='127.0.0.1'")
parser.add_argument("-p", "--port", dest="port", default="8888",
help="REST server port; default=8888")
parser.add_argument("-u", "--user", dest="user", default="root",
help="EmPOWER admin user; default='root'")
parser.add_argument("-n", "--no-passwd", action="store_true",
dest="no_passwd", default=False,
help="Run without password; default false")
parser.add_argument("-f", "--passwd-file", dest="passwdfile",
default=None, help="Password file; default=none")
parser.add_argument("-t", "--transport", dest="transport", default="http",
help="Specify the transport; default='http'")
(args, _) = parser.parse_known_args(args)
return args, arglist, parser
def print_available_cmds():
""" Print list of available commands. """
cmds = [x for x in CMDS.keys()]
cmds.remove('help')
cmds.sort()
print("\nAvailable commands are: ")
for cmd in cmds:
print(" {0:25} {1:10}".format(cmd, DESCS[cmd]))
print("\nSee '%s help <command>' for more info." % sys.argv[0])
def main():
""" Parse argument list and execute command. """
(gargs, rargs, parser) = parse_global_args(sys.argv[1:])
if len(sys.argv) == 1:
print(parser.format_help().strip())
print_available_cmds()
sys.exit()
if len(rargs) < 1:
print("Unknown command")
print_available_cmds()
sys.exit()
(parse_args, do_func) = CMDS[rargs[0]]
(args, leftovers) = parse_args(rargs[1:], rargs[0])
do_func(gargs, args, leftovers)
if __name__ == '__main__':
main()
| apache-2.0 | 2,858,925,847,260,787,000 | 26.479042 | 79 | 0.569623 | false |
PimentNoir/xbmc | addons/service.xbmc.versioncheck/service.py | 23 | 4122 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import platform
import xbmc
import lib.common
from lib.common import log, dialog_yesno
from lib.common import upgrade_message as _upgrademessage
from lib.common import upgrade_message2 as _upgrademessage2
ADDON = lib.common.ADDON
ADDONVERSION = lib.common.ADDONVERSION
ADDONNAME = lib.common.ADDONNAME
ADDONPATH = lib.common.ADDONPATH
ICON = lib.common.ICON
oldversion = False
monitor = xbmc.Monitor()
class Main:
def __init__(self):
linux = False
packages = []
if monitor.waitForAbort(5):
sys.exit(0)
if xbmc.getCondVisibility('System.Platform.Linux') and ADDON.getSetting("upgrade_apt") == 'true':
packages = ['kodi']
_versionchecklinux(packages)
else:
oldversion, version_installed, version_available, version_stable = _versioncheck()
if oldversion:
_upgrademessage2( version_installed, version_available, version_stable, oldversion, False)
def _versioncheck():
# initial vars
from lib.jsoninterface import get_installedversion, get_versionfilelist
from lib.versions import compare_version
# retrieve versionlists from supplied version file
versionlist = get_versionfilelist()
# retrieve version installed
version_installed = get_installedversion()
# compare installed and available
oldversion, version_installed, version_available, version_stable = compare_version(version_installed, versionlist)
return oldversion, version_installed, version_available, version_stable
def _versionchecklinux(packages):
if platform.dist()[0].lower() in ['ubuntu', 'debian', 'linuxmint']:
handler = False
result = False
try:
# try aptdaemon first
from lib.aptdaemonhandler import AptdaemonHandler
handler = AptdaemonHandler()
except:
# fallback to shell
# since we need the user password, ask to check for new version first
from lib.shellhandlerapt import ShellHandlerApt
sudo = True
handler = ShellHandlerApt(sudo)
if dialog_yesno(32015):
pass
elif dialog_yesno(32009, 32010):
log("disabling addon by user request")
ADDON.setSetting("versioncheck_enable", 'false')
return
if handler:
if handler.check_upgrade_available(packages[0]):
if _upgrademessage(32012, oldversion, True):
if ADDON.getSetting("upgrade_system") == "false":
result = handler.upgrade_package(packages[0])
else:
result = handler.upgrade_system()
if result:
from lib.common import message_upgrade_success, message_restart
message_upgrade_success()
message_restart()
else:
log("Error during upgrade")
else:
log("Error: no handler found")
else:
log("Unsupported platform %s" %platform.dist()[0])
sys.exit(0)
if (__name__ == "__main__"):
if ADDON.getSetting("versioncheck_enable") == "false":
log("Disabled")
else:
log('Version %s started' % ADDONVERSION)
Main()
| gpl-2.0 | 4,657,650,795,815,922,000 | 35.803571 | 118 | 0.623241 | false |
yuanbaowen521/tadbit | _pytadbit/utils/hic_filtering.py | 1 | 12308 | """
06 Aug 2013
"""
from warnings import warn
from sys import stderr
from re import sub
from pytadbit.utils.extraviews import tadbit_savefig
import numpy as np
try:
from matplotlib import pyplot as plt
except ImportError:
warn('matplotlib not found\n')
def get_r2 (fun, X, Y, *args):
sstot = sum([(Y[i]-np.mean(Y))**2 for i in xrange(len(Y))])
sserr = sum([(Y[i] - fun(X[i], *args))**2 for i in xrange(len(Y))])
return 1 - sserr/sstot
def filter_by_mean(matrx, draw_hist=False, silent=False, savefig=None):
"""
fits the distribution of Hi-C interaction count by column in the matrix to
a polynomial. Then searches for the first possible
"""
nbins = 100
# get sum of columns
cols = []
size = len(matrx)
for c in sorted([[matrx.get(i+j*size, 0) for j in xrange(size)]
for i in xrange(size)], key=sum):
cols.append(sum(c))
cols = np.array(cols)
if draw_hist:
plt.figure(figsize=(9, 9))
percentile = np.percentile(cols, 5)
# mad = np.median([abs(median - c ) for c in cols])
best =(None, None, None, None)
# bin the sum of columns
xmin = min(cols)
xmax = max(cols)
y = np.linspace(xmin, xmax, nbins)
hist = np.digitize(cols, y)
x = [sum(hist == i) for i in range(1, nbins + 1)]
if draw_hist:
hist = plt.hist(cols, bins=100, alpha=.3, color='grey')
xp = range(0, int(cols[-1]))
# check if the binning is correct
# we want at list half of the bins with some data
try:
cnt = 0
while list(x).count(0) > len(x)/2:
cnt += 1
cols = cols[:-1]
xmin = min(cols)
xmax = max(cols)
y = np.linspace(xmin, xmax, nbins)
hist = np.digitize(cols, y)
x = [sum(hist == i) for i in range(1, nbins + 1)]
if draw_hist:
plt.clf()
hist = plt.hist(cols, bins=100, alpha=.3, color='grey')
xp = range(0, int(cols[-1]))
if cnt > 10000:
raise ValueError
except ValueError:
if not silent:
stderr.write('WARNING: Too few data to filter columns. ' +
'SKIPPING...\n')
return {}
# find best polynomial fit in a given range
for order in range(6, 18):
z = np.polyfit(y, x, order)
zp = np.polyder(z, m=1)
roots = np.roots(np.polyder(z))
# check that we are concave down, otherwise take next root
pente = np.polyval(zp, abs(roots[-2] - roots[-1]) / 2 + roots[-1])
if pente > 0:
root = roots[-1]
else:
root = roots[-2]
# root must be higher than zero
if root <= 0:
continue
# and lower than the median
if root >= percentile:
continue
p = np.poly1d(z)
R2 = get_r2(p, x, y)
# try to avoid very large orders by weigthing negatively their fit
if order > 13:
R2 -= float(order)/30
if best[0] < R2:
best = (R2, order, p, z, root)
try:
p, z, root = best[2:]
except:
if not silent:
stderr.write('WARNING: Too many zeroes to filter columns.' +
' SKIPPING...\n')
return {}
if draw_hist:
a = plt.plot(xp, p(xp), "--", color='k')
b = plt.vlines(root, 0, plt.ylim()[1], colors='r', linestyles='dashed')
# c = plt.vlines(median - mad * 1.5, 0, 110, colors='g',
# linestyles='dashed')
try:
plt.legend(a+[b], ['polyfit \n%s' % (
''.join([sub('e([-+][0-9]+)', 'e^{\\1}',
'$%s%.1fx^%s$' % ('+' if j>0 else '', j,
'{' + str(i) + '}'))
for i, j in enumerate(list(p)[::-1])])),
'first solution of polynomial derivation'],
fontsize='x-small')
except TypeError:
plt.legend(a+[b], ['polyfit \n%s' % (
''.join([sub('e([-+][0-9]+)', 'e^{\\1}',
'$%s%.1fx^%s$' % ('+' if j>0 else '', j,
'{' + str(i) + '}'))
for i, j in enumerate(list(p)[::-1])])),
'first solution of polynomial derivation'])
# plt.legend(a+[b]+[c], ['polyfit \n{}'.format (
# ''.join([sub('e([-+][0-9]+)', 'e^{\\1}',
# '${}{:.1}x^{}$'.format ('+' if j>0 else '', j,
# '{' + str(i) + '}'))
# for i, j in enumerate(list(p)[::-1])])),
# 'first solution of polynomial derivation',
# 'median - (1.5 * median absolute deviation)'],
# fontsize='x-small')
plt.ylim(0, plt.ylim()[1])
if savefig:
tadbit_savefig(savefig)
else:
plt.show()
# label as bad the columns with sums lower than the root
bads = {}
for i, col in enumerate([[matrx.get(i+j*size, 0)
for j in xrange(size)]
for i in xrange(size)]):
if sum(col) < root:
bads[i] = sum(col)
# now stored in Experiment._zeros, used for getting more accurate z-scores
if bads and not silent:
stderr.write(('\nWARNING: removing columns having less than %s ' +
'counts:\n %s\n') % (
round(root, 3), ' '.join(
['%5s'%str(i + 1) + (''if (j + 1) % 20 else '\n')
for j, i in enumerate(sorted(bads.keys()))])))
return bads
def filter_by_zero_count(matrx, draw_hist=False, savefig=None):
"""
fits the distribution of Hi-C interaction count by column in the matrix to
a polynomial. Then searches for the first possible
"""
nbins = 100
# get sum of columns
cols = []
for c in sorted(matrx, key=sum):
cols.append(len(c) - c.count(0))
cols = np.array(cols)
if draw_hist:
plt.figure(figsize=(9, 9))
median = np.median(cols)
# mad = np.median([abs(median - c ) for c in cols])
best =(None, None, None, None)
# bin the sum of columns
xmin = min(cols)
xmax = max(cols)
y = np.linspace(xmin, xmax, nbins)
hist = np.digitize(cols, y)
x = [sum(hist == i) for i in range(1, nbins + 1)]
if draw_hist:
hist = plt.hist(cols, bins=100, alpha=.3, color='grey')
xp = range(0, cols[-1])
# check if the binning is correct
# we want at list half of the bins with some data
while list(x).count(0) > 2*len(x)/3:
cols = cols[:-1]
xmin = min(cols)
xmax = max(cols)
y = np.linspace(xmin, xmax, nbins)
hist = np.digitize(cols, y)
x = [sum(hist == i) for i in range(1, nbins + 1)]
if draw_hist:
plt.clf()
hist = plt.hist(cols, bins=100, alpha=.3, color='grey')
xp = range(0, cols[-1])
# find best polynomial fit in a given range
for order in range(7, 14):
z = np.polyfit(y, x, order)
zpp = np.polyder(z, m=1)
roots = np.roots(np.polyder(z))
# check that we are concave down, otherwise take next root
pente = np.polyval(zpp, abs(roots[-2] - roots[-1]) / 2 + roots[-1])
if pente > 0:
root = roots[-1]
else:
root = roots[-2]
# root must be higher than zero
if root <= 0:
continue
# and lower than the median
if root >= median:
continue
p = np.poly1d(z)
R2 = get_r2(p, x, y)
if best[0] < R2:
best = (R2, order, p, z, root)
p, z, root = best[2:]
if draw_hist:
a = plt.plot(xp, p(xp), "--", color='k')
b = plt.vlines(root, 0, plt.ylim()[1], colors='r', linestyles='dashed')
try:
plt.legend(a + [b], ['polyfit \n%s' % (
''.join([sub('e([-+][0-9]+)', 'e^{\\1}',
'$%s%.1fx^%s$' % ('+' if j>0 else '', j,
'{' + str(i) + '}'))
for i, j in enumerate(list(p)[::-1])])),
'first solution of polynomial derivation'],
fontsize='x-small')
except TypeError:
plt.legend(a + [b], ['polyfit \n%s' % (
''.join([sub('e([-+][0-9]+)', 'e^{\\1}',
'$%s%.1fx^%s$' % ('+' if j>0 else '', j,
'{' + str(i) + '}'))
for i, j in enumerate(list(p)[::-1])])),
'first solution of polynomial derivation'])
plt.ylim(0, plt.ylim()[1])
if savefig:
tadbit_savefig(savefig)
else:
plt.show()
# label as bad the columns with sums lower than the root
bads = {}
for i, col in enumerate(matrx):
if sum(col) < root:
bads[i] = None
# now stored in Experiment._zeros, used for getting more accurate z-scores
return bads
def filter_by_stdev(matrx):
means = [np.mean(c) for c in matrx]
mean = np.mean(means)
stde = np.std(means)
root = mean - stde * 1.25
# label as bad the columns with sums lower than the root
bads = {}
for i, col in enumerate(matrx):
if sum(col) < root:
bads[i] = None
# now stored in Experiment._zeros, used for getting more accurate z-scores
return bads
def filter_by_mad(matrx):
# get sum of columns
cols = []
for c in sorted(matrx, key=sum):
cols.append(sum(c))
cols = np.array(cols)
median = np.median(cols)
mad = np.median([abs(median - c ) for c in cols])
root = median - mad * 1.5
# label as bad the columns with sums lower than the root
bads = {}
for i, col in enumerate(matrx):
if sum(col) < root:
bads[i] = None
# now stored in Experiment._zeros, used for getting more accurate z-scores
return bads
def hic_filtering_for_modelling(matrx, method='mean', silent=False,
draw_hist=False, savefig=None, diagonal=True):
"""
Call filtering function, to remove artefactual columns in a given Hi-C
matrix. This function will detect columns with very low interaction
counts; and columns with NaN values (in this case NaN will be replaced
by zero in the original Hi-C data matrix). Filtered out columns will be
stored in the dictionary Experiment._zeros.
:param matrx: Hi-C matrix of a given experiment
:param False silent: does not warn for removed columns
:param mean method: method to use for filtering Hi-C columns. Aims to
remove columns with abnormally low count of interactions
:param None savefig: path to a file where to save the image generated;
if None, the image will be shown using matplotlib GUI (the extension
of the file name will determine the desired format).
:param True diagonal: remove row/columns with zero in the diagonal
:returns: the indexes of the columns not to be considered for the
calculation of the z-score
"""
if method == 'mean':
bads = filter_by_mean(matrx, draw_hist=draw_hist, silent=silent,
savefig=savefig)
elif method == 'zeros':
bads = filter_by_zero_count(matrx)
elif method == 'mad':
bads = filter_by_mad(matrx)
elif method == 'stdev':
bads = filter_by_stdev(matrx)
else:
raise Exception
# also removes rows or columns containing a NaN
has_nans = False
for i in xrange(len(matrx)):
if matrx.get(i + i * len(matrx), 0) == 0 and diagonal:
if not i in bads:
bads[i] = None
elif repr(sum([matrx.get(i + j * len(matrx), 0)
for j in xrange(len(matrx))])) == 'nan':
has_nans = True
if not i in bads:
bads[i] = None
return bads, has_nans
| gpl-3.0 | 8,235,652,805,680,449,000 | 37.223602 | 79 | 0.500731 | false |
keras-team/keras-io | examples/audio/speaker_recognition_using_cnn.py | 1 | 14417 | """
Title: Speaker Recognition
Author: [Fadi Badine](https://twitter.com/fadibadine)
Date created: 14/06/2020
Last modified: 03/07/2020
Description: Classify speakers using Fast Fourier Transform (FFT) and a 1D Convnet.
"""
"""
## Introduction
This example demonstrates how to create a model to classify speakers from the
frequency domain representation of speech recordings, obtained via Fast Fourier
Transform (FFT).
It shows the following:
- How to use `tf.data` to load, preprocess and feed audio streams into a model
- How to create a 1D convolutional network with residual
connections for audio classification.
Our process:
- We prepare a dataset of speech samples from different speakers, with the speaker as label.
- We add background noise to these samples to augment our data.
- We take the FFT of these samples.
- We train a 1D convnet to predict the correct speaker given a noisy FFT speech sample.
Note:
- This example should be run with TensorFlow 2.3 or higher, or `tf-nightly`.
- The noise samples in the dataset need to be resampled to a sampling rate of 16000 Hz
before using the code in this example. In order to do this, you will need to have
installed `ffmpg`.
"""
"""
## Setup
"""
import os
import shutil
import numpy as np
import tensorflow as tf
from tensorflow import keras
from pathlib import Path
from IPython.display import display, Audio
# Get the data from https://www.kaggle.com/kongaevans/speaker-recognition-dataset/download
# and save it to the 'Downloads' folder in your HOME directory
DATASET_ROOT = os.path.join(os.path.expanduser("~"), "Downloads/16000_pcm_speeches")
# The folders in which we will put the audio samples and the noise samples
AUDIO_SUBFOLDER = "audio"
NOISE_SUBFOLDER = "noise"
DATASET_AUDIO_PATH = os.path.join(DATASET_ROOT, AUDIO_SUBFOLDER)
DATASET_NOISE_PATH = os.path.join(DATASET_ROOT, NOISE_SUBFOLDER)
# Percentage of samples to use for validation
VALID_SPLIT = 0.1
# Seed to use when shuffling the dataset and the noise
SHUFFLE_SEED = 43
# The sampling rate to use.
# This is the one used in all of the audio samples.
# We will resample all of the noise to this sampling rate.
# This will also be the output size of the audio wave samples
# (since all samples are of 1 second long)
SAMPLING_RATE = 16000
# The factor to multiply the noise with according to:
# noisy_sample = sample + noise * prop * scale
# where prop = sample_amplitude / noise_amplitude
SCALE = 0.5
BATCH_SIZE = 128
EPOCHS = 100
"""
## Data preparation
The dataset is composed of 7 folders, divided into 2 groups:
- Speech samples, with 5 folders for 5 different speakers. Each folder contains
1500 audio files, each 1 second long and sampled at 16000 Hz.
- Background noise samples, with 2 folders and a total of 6 files. These files
are longer than 1 second (and originally not sampled at 16000 Hz, but we will resample them to 16000 Hz).
We will use those 6 files to create 354 1-second-long noise samples to be used for training.
Let's sort these 2 categories into 2 folders:
- An `audio` folder which will contain all the per-speaker speech sample folders
- A `noise` folder which will contain all the noise samples
"""
"""
Before sorting the audio and noise categories into 2 folders,
we have the following directory structure:
```
main_directory/
...speaker_a/
...speaker_b/
...speaker_c/
...speaker_d/
...speaker_e/
...other/
..._background_noise_/
```
After sorting, we end up with the following structure:
```
main_directory/
...audio/
......speaker_a/
......speaker_b/
......speaker_c/
......speaker_d/
......speaker_e/
...noise/
......other/
......_background_noise_/
```
"""
# If folder `audio`, does not exist, create it, otherwise do nothing
if os.path.exists(DATASET_AUDIO_PATH) is False:
os.makedirs(DATASET_AUDIO_PATH)
# If folder `noise`, does not exist, create it, otherwise do nothing
if os.path.exists(DATASET_NOISE_PATH) is False:
os.makedirs(DATASET_NOISE_PATH)
for folder in os.listdir(DATASET_ROOT):
if os.path.isdir(os.path.join(DATASET_ROOT, folder)):
if folder in [AUDIO_SUBFOLDER, NOISE_SUBFOLDER]:
# If folder is `audio` or `noise`, do nothing
continue
elif folder in ["other", "_background_noise_"]:
# If folder is one of the folders that contains noise samples,
# move it to the `noise` folder
shutil.move(
os.path.join(DATASET_ROOT, folder),
os.path.join(DATASET_NOISE_PATH, folder),
)
else:
# Otherwise, it should be a speaker folder, then move it to
# `audio` folder
shutil.move(
os.path.join(DATASET_ROOT, folder),
os.path.join(DATASET_AUDIO_PATH, folder),
)
"""
## Noise preparation
In this section:
- We load all noise samples (which should have been resampled to 16000)
- We split those noise samples to chuncks of 16000 samples which
correspond to 1 second duration each
"""
# Get the list of all noise files
noise_paths = []
for subdir in os.listdir(DATASET_NOISE_PATH):
subdir_path = Path(DATASET_NOISE_PATH) / subdir
if os.path.isdir(subdir_path):
noise_paths += [
os.path.join(subdir_path, filepath)
for filepath in os.listdir(subdir_path)
if filepath.endswith(".wav")
]
print(
"Found {} files belonging to {} directories".format(
len(noise_paths), len(os.listdir(DATASET_NOISE_PATH))
)
)
"""
Resample all noise samples to 16000 Hz
"""
command = (
"for dir in `ls -1 " + DATASET_NOISE_PATH + "`; do "
"for file in `ls -1 " + DATASET_NOISE_PATH + "/$dir/*.wav`; do "
"sample_rate=`ffprobe -hide_banner -loglevel panic -show_streams "
"$file | grep sample_rate | cut -f2 -d=`; "
"if [ $sample_rate -ne 16000 ]; then "
"ffmpeg -hide_banner -loglevel panic -y "
"-i $file -ar 16000 temp.wav; "
"mv temp.wav $file; "
"fi; done; done"
)
os.system(command)
# Split noise into chunks of 16,000 steps each
def load_noise_sample(path):
sample, sampling_rate = tf.audio.decode_wav(
tf.io.read_file(path), desired_channels=1
)
if sampling_rate == SAMPLING_RATE:
# Number of slices of 16000 each that can be generated from the noise sample
slices = int(sample.shape[0] / SAMPLING_RATE)
sample = tf.split(sample[: slices * SAMPLING_RATE], slices)
return sample
else:
print("Sampling rate for {} is incorrect. Ignoring it".format(path))
return None
noises = []
for path in noise_paths:
sample = load_noise_sample(path)
if sample:
noises.extend(sample)
noises = tf.stack(noises)
print(
"{} noise files were split into {} noise samples where each is {} sec. long".format(
len(noise_paths), noises.shape[0], noises.shape[1] // SAMPLING_RATE
)
)
"""
## Dataset generation
"""
def paths_and_labels_to_dataset(audio_paths, labels):
"""Constructs a dataset of audios and labels."""
path_ds = tf.data.Dataset.from_tensor_slices(audio_paths)
audio_ds = path_ds.map(lambda x: path_to_audio(x))
label_ds = tf.data.Dataset.from_tensor_slices(labels)
return tf.data.Dataset.zip((audio_ds, label_ds))
def path_to_audio(path):
"""Reads and decodes an audio file."""
audio = tf.io.read_file(path)
audio, _ = tf.audio.decode_wav(audio, 1, SAMPLING_RATE)
return audio
def add_noise(audio, noises=None, scale=0.5):
if noises is not None:
# Create a random tensor of the same size as audio ranging from
# 0 to the number of noise stream samples that we have.
tf_rnd = tf.random.uniform(
(tf.shape(audio)[0],), 0, noises.shape[0], dtype=tf.int32
)
noise = tf.gather(noises, tf_rnd, axis=0)
# Get the amplitude proportion between the audio and the noise
prop = tf.math.reduce_max(audio, axis=1) / tf.math.reduce_max(noise, axis=1)
prop = tf.repeat(tf.expand_dims(prop, axis=1), tf.shape(audio)[1], axis=1)
# Adding the rescaled noise to audio
audio = audio + noise * prop * scale
return audio
def audio_to_fft(audio):
# Since tf.signal.fft applies FFT on the innermost dimension,
# we need to squeeze the dimensions and then expand them again
# after FFT
audio = tf.squeeze(audio, axis=-1)
fft = tf.signal.fft(
tf.cast(tf.complex(real=audio, imag=tf.zeros_like(audio)), tf.complex64)
)
fft = tf.expand_dims(fft, axis=-1)
# Return the absolute value of the first half of the FFT
# which represents the positive frequencies
return tf.math.abs(fft[:, : (audio.shape[1] // 2), :])
# Get the list of audio file paths along with their corresponding labels
class_names = os.listdir(DATASET_AUDIO_PATH)
print("Our class names: {}".format(class_names,))
audio_paths = []
labels = []
for label, name in enumerate(class_names):
print("Processing speaker {}".format(name,))
dir_path = Path(DATASET_AUDIO_PATH) / name
speaker_sample_paths = [
os.path.join(dir_path, filepath)
for filepath in os.listdir(dir_path)
if filepath.endswith(".wav")
]
audio_paths += speaker_sample_paths
labels += [label] * len(speaker_sample_paths)
print(
"Found {} files belonging to {} classes.".format(len(audio_paths), len(class_names))
)
# Shuffle
rng = np.random.RandomState(SHUFFLE_SEED)
rng.shuffle(audio_paths)
rng = np.random.RandomState(SHUFFLE_SEED)
rng.shuffle(labels)
# Split into training and validation
num_val_samples = int(VALID_SPLIT * len(audio_paths))
print("Using {} files for training.".format(len(audio_paths) - num_val_samples))
train_audio_paths = audio_paths[:-num_val_samples]
train_labels = labels[:-num_val_samples]
print("Using {} files for validation.".format(num_val_samples))
valid_audio_paths = audio_paths[-num_val_samples:]
valid_labels = labels[-num_val_samples:]
# Create 2 datasets, one for training and the other for validation
train_ds = paths_and_labels_to_dataset(train_audio_paths, train_labels)
train_ds = train_ds.shuffle(buffer_size=BATCH_SIZE * 8, seed=SHUFFLE_SEED).batch(
BATCH_SIZE
)
valid_ds = paths_and_labels_to_dataset(valid_audio_paths, valid_labels)
valid_ds = valid_ds.shuffle(buffer_size=32 * 8, seed=SHUFFLE_SEED).batch(32)
# Add noise to the training set
train_ds = train_ds.map(
lambda x, y: (add_noise(x, noises, scale=SCALE), y),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
# Transform audio wave to the frequency domain using `audio_to_fft`
train_ds = train_ds.map(
lambda x, y: (audio_to_fft(x), y), num_parallel_calls=tf.data.experimental.AUTOTUNE
)
train_ds = train_ds.prefetch(tf.data.experimental.AUTOTUNE)
valid_ds = valid_ds.map(
lambda x, y: (audio_to_fft(x), y), num_parallel_calls=tf.data.experimental.AUTOTUNE
)
valid_ds = valid_ds.prefetch(tf.data.experimental.AUTOTUNE)
"""
## Model Definition
"""
def residual_block(x, filters, conv_num=3, activation="relu"):
# Shortcut
s = keras.layers.Conv1D(filters, 1, padding="same")(x)
for i in range(conv_num - 1):
x = keras.layers.Conv1D(filters, 3, padding="same")(x)
x = keras.layers.Activation(activation)(x)
x = keras.layers.Conv1D(filters, 3, padding="same")(x)
x = keras.layers.Add()([x, s])
x = keras.layers.Activation(activation)(x)
return keras.layers.MaxPool1D(pool_size=2, strides=2)(x)
def build_model(input_shape, num_classes):
inputs = keras.layers.Input(shape=input_shape, name="input")
x = residual_block(inputs, 16, 2)
x = residual_block(x, 32, 2)
x = residual_block(x, 64, 3)
x = residual_block(x, 128, 3)
x = residual_block(x, 128, 3)
x = keras.layers.AveragePooling1D(pool_size=3, strides=3)(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(256, activation="relu")(x)
x = keras.layers.Dense(128, activation="relu")(x)
outputs = keras.layers.Dense(num_classes, activation="softmax", name="output")(x)
return keras.models.Model(inputs=inputs, outputs=outputs)
model = build_model((SAMPLING_RATE // 2, 1), len(class_names))
model.summary()
# Compile the model using Adam's default learning rate
model.compile(
optimizer="Adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# Add callbacks:
# 'EarlyStopping' to stop training when the model is not enhancing anymore
# 'ModelCheckPoint' to always keep the model that has the best val_accuracy
model_save_filename = "model.h5"
earlystopping_cb = keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)
mdlcheckpoint_cb = keras.callbacks.ModelCheckpoint(
model_save_filename, monitor="val_accuracy", save_best_only=True
)
"""
## Training
"""
history = model.fit(
train_ds,
epochs=EPOCHS,
validation_data=valid_ds,
callbacks=[earlystopping_cb, mdlcheckpoint_cb],
)
"""
## Evaluation
"""
print(model.evaluate(valid_ds))
"""
We get ~ 98% validation accuracy.
"""
"""
## Demonstration
Let's take some samples and:
- Predict the speaker
- Compare the prediction with the real speaker
- Listen to the audio to see that despite the samples being noisy,
the model is still pretty accurate
"""
SAMPLES_TO_DISPLAY = 10
test_ds = paths_and_labels_to_dataset(valid_audio_paths, valid_labels)
test_ds = test_ds.shuffle(buffer_size=BATCH_SIZE * 8, seed=SHUFFLE_SEED).batch(
BATCH_SIZE
)
test_ds = test_ds.map(lambda x, y: (add_noise(x, noises, scale=SCALE), y))
for audios, labels in test_ds.take(1):
# Get the signal FFT
ffts = audio_to_fft(audios)
# Predict
y_pred = model.predict(ffts)
# Take random samples
rnd = np.random.randint(0, BATCH_SIZE, SAMPLES_TO_DISPLAY)
audios = audios.numpy()[rnd, :, :]
labels = labels.numpy()[rnd]
y_pred = np.argmax(y_pred, axis=-1)[rnd]
for index in range(SAMPLES_TO_DISPLAY):
# For every sample, print the true and predicted label
# as well as run the voice with the noise
print(
"Speaker:\33{} {}\33[0m\tPredicted:\33{} {}\33[0m".format(
"[92m" if labels[index] == y_pred[index] else "[91m",
class_names[labels[index]],
"[92m" if labels[index] == y_pred[index] else "[91m",
class_names[y_pred[index]],
)
)
display(Audio(audios[index, :, :].squeeze(), rate=SAMPLING_RATE))
| apache-2.0 | 4,170,799,372,194,741,000 | 29.805556 | 105 | 0.677603 | false |
DanielCoutoVale/openccg | bin/dlf_parser.py | 4 | 9487 | #
# dlf_parser.py (invoked by ccg-draw-graph) uses graphviz's dot to visualize (D)LF graphs
#
# author: Jonathan Barker (with minor contributions by Michael White)
# license: LGPL
#
from xml.etree.ElementTree import ElementTree
import optparse, sys, codecs, xml, os
from collections import defaultdict
# Parse arguments
op = optparse.OptionParser()
op.add_option("-i", "--input", type="string", help="input source: file or <stdin>(default)", default=sys.stdin)
op.add_option("-m", "--moses", type="string", help="file/directory prefix for moses output", default=None)
op.add_option("-v", "--visualize", type="string", help="file/directory prefix for .pdf output", default=None)
op.add_option("-w", "--wordindices", action="store_true", help="include word indices", default=False)
op.add_option("-c", "--classnames", action="store_true", help="include semantic class names", default=False)
(ops, args) = op.parse_args(sys.argv)
# Parse input file
input_source = ops.input if ops.input is sys.stdin else open(ops.input, "rt")
raw = xml.etree.ElementTree.XML(input_source.read())
snum = "None"
att_id = 0
# Get word number
def wordNum(wid):
if wid.startswith("x"):
return -1
else:
return int(wid[1:].strip("f"))
# Get node span
def span(nid, graph, w):
if wordNum(nid) in w:
return []
w.append(wordNum(nid))
for n, e in graph[nid]:
if wordNum(n) not in w:
w.append(wordNum(n))
w.extend(span(n, graph, w))
return w
# findall wrapper
def findAll(elem, match):
return max(elem.findall(match), [])
# Class for representing predicates and attributes
class Pred:
def __init__(self):
self.attrib = []
self.one_of = []
self.opt = []
# Class for representing nodes, contains predicate and attribute information
class Node:
def __init__(self):
self.id = ""
self.className = ""
self.preds = defaultdict(Pred)
def addPred(self, pred, attrib, one_of, opt):
self.preds[pred].attrib.extend(attrib)
self.preds[pred].one_of.extend(one_of)
self.preds[pred].opt.extend(opt)
def moses(self, graph):
tree = " <tree label=\""
# label
pred = self.pred
if pred is None:
pred = self.id
tree += "_".join([pred]+[k.upper()+"_"+v for (k,v) in self.attrib])
# span
tree += "\" span=\""
s = span(self.id, graph, [])
tree += str(min(s))+"-"+str(max(s))
tree += "\"/>"
return tree
def dot(self):
dot_node = self.id+" [label=<"
withClassName = ops.classnames and len(self.className) > 0
if ops.wordindices:
dot_node += self.id
if withClassName:
dot_node += ":"
if withClassName:
dot_node += self.className
if len(self.preds) > 0:
if ops.wordindices or withClassName:
dot_node += ":"
labels = []
for pname, p in self.preds.items():
label = ""
# pred
label += "<FONT POINT-SIZE=\"20.0\">"+pname+"</FONT>"
# att
atts = []
if len(p.attrib) > 0:
atts.append(",".join(["<"+k.upper()+">"+v for (k, v) in p.attrib]))
if len(p.one_of) > 0:
atts.append("|".join(["<"+k.upper()+">"+v for (k, v) in p.one_of]))
if len(p.opt) > 0:
atts.append("("+",".join(["<"+k.upper()+">"+v for (k, v) in p.opt])+")?")
if len(atts) > 0:
label += "<FONT POINT-SIZE=\"8.0\">"+",".join(atts)+"</FONT>"
labels.append(label)
dot_node += " | ".join(labels)
dot_node += ">];\n"
return dot_node
def info(self):
print "Node id:",self.id
for pname, p in self.preds:
print "\tPred:",self.pred
print "\t\tAttrib:",p.attrib
print "\t\tOne_of:",p.one_of
print "\t\tOpt:",p.opt
print "----------------"
# Returns just the id, stripping the class (if any)
def parseId(str):
colonIndex = str.find(":")
if colonIndex > 0: return str[:colonIndex]
else: return str
# Returns the class from the id, or the empty string if none
def parseClass(str):
colonIndex = str.find(":")
if colonIndex > 0: return str[colonIndex+1:]
else: return ""
# Method for parsing <node>
def parseNode(node, graph, nodes):
n = nodes[node.get("id")]
n.id = parseId(node.get("id"))
n.className = parseClass(node.get("id"))
attrib = [(k, v) for (k, v) in node.items() if k not in ["id", "pred"]]
if node.get("pred") is not None:
n.addPred(node.get("pred"), attrib, [], [])
nodes[n.id] = n
for elem in list(node):
if elem.tag == "rel":
parseRel(elem, n.id, graph, nodes, "")
elif elem.tag == "one-of":
parseOneOf(elem, n, attrib, node.get("pred"), graph, nodes)
elif elem.tag == "opt":
parseOpt(elem, n, graph, nodes)
elif elem.tag == "node":
parseNode(elem, graph, nodes)
else:
print snum+": Unexpected tag <"+elem.tag+"> after <node>"
quit()
# Method for parsing <opt>
def parseOpt(opt, node, graph, nodes):
for elem in list(opt):
if elem.tag == "atts":
for pname, p in node.preds.items():
node.addPred(pname, [], [], [(k, v) for (k, v) in elem.items() if k not in ["id", "pred"]])
elif elem.tag == "rel":
parseRel(elem, node.id, graph, nodes, "style=dotted, ")
else:
print snum+": Unexpected tag <"+elem.tag+"> after <node>"
quit()
# Method for parsing <one-of>
def parseOneOf(oneof, node, attrib, pred, graph, nodes):
global att_id
num_att = 0
for elem in list(oneof):
if elem.tag == "atts":
if pred is not None:
node.addPred(pred, [], [(k, v) for (k, v) in elem.items() if k not in ["id", "pred"]], [])
else:
node.addPred(elem.get("pred"), [], [(k, v) for (k, v) in elem.items() if k not in ["id", "pred"]], [])
if len(list(elem)) > 0:
num_att += 1
new_att = Node()
new_att.id = "att"+str(att_id)
att_id += 1
new_att.addPred(str(num_att), [], [], [])
nodes[new_att.id] = new_att
graph[node.id].append((new_att.id, " [style=dashed];\n"))
for rel in list(elem):
parseRel(rel, new_att.id, graph, nodes, "")
elif elem.tag == "rel":
num_att += 1
new_att = Node()
new_att.id = "att"+str(att_id)
att_id += 1
new_att.addPred(str(num_att), [], [], [])
nodes[new_att.id] = new_att
graph[node.id].append((new_att.id, " [style=dashed];\n"))
parseRel(elem, new_att.id, graph, nodes, "")
else:
print snum+": Unexpected tag <"+elem.tag+"> after <one-of>"
quit()
# Method for parsing <rel>
def parseRel(rel, nid, graph, nodes, style):
# <rel>
for subnode in list(rel):
if subnode.tag == "node":
edge_label = " ["+style+"label = \""+rel.get("name")+"\"];\n"
if subnode.get("id") is None:
graph[nid].append((parseId(subnode.get("idref")), edge_label))
else:
graph[nid].append((parseId(subnode.get("id")), edge_label))
parseNode(subnode, graph, nodes)
elif subnode.tag == "one-of":
subnode.set("name", rel.get("name"))
parseRel(subnode, nid, graph, nodes, "style=dashed, ")
else:
print snum+": Unexpected tag <"+subnode.tag+"> after <rel>"
quit()
# <item>
item_no = 0
for item in findAll(raw, "item"):
item_no += 1
if item.get("numOfParses") == "0":
print "Removing "+item.get("info")
else:
snum = item.get("info")
# <lf>
lf_num = 0
for lf in findAll(item, "lf"):
graph = defaultdict(list)
nodes = defaultdict(Node)
# <node>
for node in list(lf):
if node.tag == "node":
parseNode(node, graph, nodes)
else:
print snum+": Unexpected tag <"+node.tag+"> after <lf>"
quit()
# Plot the graph with GraphViz
if ops.visualize != None:
viz_name = ""
if type(item.get("info")) != type("string"):
viz_name = ops.visualize+".item"+str(item_no)+"."+str(lf_num)
else:
viz_name = ops.visualize+"."+item.get("info")+"."+str(lf_num)
viz = codecs.open(viz_name+".dot", "w", "utf-8")
viz.write("digraph lf {\n")
for (k, v) in nodes.items():
viz.write(v.dot())
for (left, rights) in graph.items():
for right in rights:
viz.write(left+"->"+right[0]+right[1])
viz.write("}\n")
viz.close()
os.system("dot -Tpdf "+viz_name+".dot -o "+viz_name+".pdf")
os.system("rm "+viz_name+".dot")
lf_num += 1
| lgpl-2.1 | 8,954,360,837,046,538,000 | 34.665414 | 118 | 0.505639 | false |
fedora-infra/pdc-updater | pdcupdater/handlers/__init__.py | 1 | 2266 | import abc
import fedmsg.utils
def load_handlers(config):
""" Import and instantiate all handlers listed in the given config. """
for import_path in config['pdcupdater.handlers']:
cls = fedmsg.utils.load_class(import_path)
handler = cls(config)
yield handler
class BaseHandler(object):
""" An abstract base class for handlers to enforce API. """
__metaclass__ = abc.ABCMeta
def __init__(self, config):
self.config = config
def construct_topics(self, config):
# Don't use the environment when using STOMP
if config.get('stomp_uri'):
return [
'.'.join([config['topic_prefix'], topic])
for topic in self.topic_suffixes
]
else:
return [
'.'.join([config['topic_prefix'], config['environment'],
topic])
for topic in self.topic_suffixes
]
@abc.abstractproperty
def topic_suffixes(self):
pass
@abc.abstractmethod
def can_handle(self, pdc, msg):
""" Return True or False if this handler can handle this message. """
pass
@abc.abstractmethod
def handle(self, pdc, msg):
""" Handle a fedmsg and update PDC if necessary. """
pass
@abc.abstractmethod
def audit(self, pdc):
""" This is intended to be called from a cronjob once every few days
and is meant to (in a read-only fashion) check that what PDC thinks is
true about a service, is actually true.
It is expected to take a long time to run.
It should return a two lists. The first should be a list of items
present in PDC but not in the other service. The second should be a
list of items present in the other service, but not in PDC. Those lists
will be sewn together into an email to the releng group.
"""
pass
@abc.abstractmethod
def initialize(self, pdc):
""" This needs to be called only once when pdc-updater is first
installed. It should query the original data source and initialize PDC
with a base layer of data.
It is expected to take a very long time to run.
"""
pass
| lgpl-2.1 | -3,433,893,769,655,368,000 | 30.472222 | 79 | 0.6015 | false |
pchauncey/ansible | lib/ansible/modules/cloud/amazon/aws_direct_connect_connection.py | 29 | 11812 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aws_direct_connect_connection
short_description: Creates, deletes, modifies a DirectConnect connection
description:
- Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location.
Upon creation the connection may be added to a link aggregation group or established as a standalone connection.
The connection may later be associated or disassociated with a link aggregation group.
version_added: "2.4"
author: "Sloane Hertel (@s-hertel)"
requirements:
- boto3
- botocore
options:
state:
description:
- The state of the Direct Connect connection.
choices:
- present
- absent
name:
description:
- The name of the Direct Connect connection. This is required to create a
new connection. To recreate or delete a connection I(name) or I(connection_id)
is required.
connection_id:
description:
- The ID of the Direct Connect connection. I(name) or I(connection_id) is
required to recreate or delete a connection. Modifying attributes of a
connection with I(force_update) will result in a new Direct Connect connection ID.
location:
description:
- Where the Direct Connect connection is located. Required when I(state=present).
bandwidth:
description:
- The bandwidth of the Direct Connect connection. Required when I(state=present).
choices:
- 1Gbps
- 10Gbps
link_aggregation_group:
description:
- The ID of the link aggregation group you want to associate with the connection.
This is optional in case a stand-alone connection is desired.
force_update:
description:
- To modify bandwidth or location the connection will need to be deleted and recreated.
By default this will not happen - this option must be set to True.
"""
EXAMPLES = """
# create a Direct Connect connection
aws_direct_connect_connection:
name: ansible-test-connection
state: present
location: EqDC2
link_aggregation_group: dxlag-xxxxxxxx
bandwidth: 1Gbps
register: dc
# disassociate the LAG from the connection
aws_direct_connect_connection:
state: present
connection_id: dc.connection.connection_id
location: EqDC2
bandwidth: 1Gbps
# replace the connection with one with more bandwidth
aws_direct_connect_connection:
state: present
name: ansible-test-connection
location: EqDC2
bandwidth: 10Gbps
force_update: True
# delete the connection
aws_direct_connect_connection:
state: absent
name: ansible-test-connection
"""
RETURN = """
connection:
description:
- The attributes of the Direct Connect connection
type: complex
returned: I(state=present)
contains:
aws_device:
description: The endpoint which the physical connection terminates on.
bandwidth:
description: The bandwidth of the connection.
connection_id:
description: ID of the Direct Connect connection.
connection_state:
description: The state of the connection.
location:
description: Where the connection is located.
owner_account:
description: The owner of the connection.
region:
description: The region in which the connection exists.
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec, HAS_BOTO3,
get_aws_connection_info, boto3_conn, AWSRetry)
from ansible.module_utils.aws.direct_connect import (DirectConnectError, delete_connection,
associate_connection_and_lag, disassociate_connection_and_lag)
try:
import botocore
except:
pass
# handled by imported HAS_BOTO3
retry_params = {"tries": 10, "delay": 5, "backoff": 1.2}
def connection_status(client, connection_id):
return connection_exists(client, connection_id=connection_id, connection_name=None, verify=False)
@AWSRetry.backoff(**retry_params)
def connection_exists(client, connection_id=None, connection_name=None, verify=True):
try:
if connection_id:
response = client.describe_connections(connectionId=connection_id)
else:
response = client.describe_connections()
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to describe DirectConnect ID {0}".format(connection_id),
last_traceback=traceback.format_exc(),
response=e.response)
match = []
connection = []
# look for matching connections
if len(response.get('connections', [])) == 1 and connection_id:
if response['connections'][0]['connectionState'] != 'deleted':
match.append(response['connections'][0]['connectionId'])
connection.extend(response['connections'])
for conn in response.get('connections', []):
if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
match.append(conn['connectionId'])
connection.append(conn)
# verifying if the connections exists; if true, return connection identifier, otherwise return False
if verify and len(match) == 1:
return match[0]
elif verify:
return False
# not verifying if the connection exists; just return current connection info
elif len(connection) == 1:
return {'connection': connection[0]}
return {'connection': {}}
@AWSRetry.backoff(**retry_params)
def create_connection(client, location, bandwidth, name, lag_id):
if not name:
raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.")
try:
if lag_id:
connection = client.create_connection(location=location,
bandwidth=bandwidth,
connectionName=name,
lagId=lag_id)
else:
connection = client.create_connection(location=location,
bandwidth=bandwidth,
connectionName=name)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name),
last_traceback=traceback.format_exc(),
response=e.response)
return connection['connectionId']
def changed_properties(current_status, location, bandwidth):
current_bandwidth = current_status['bandwidth']
current_location = current_status['location']
return current_bandwidth != bandwidth or current_location != location
@AWSRetry.backoff(**retry_params)
def update_associations(client, latest_state, connection_id, lag_id):
changed = False
if 'lagId' in latest_state and lag_id != latest_state['lagId']:
disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId'])
changed = True
if (changed and lag_id) or (lag_id and 'lagId' not in latest_state):
associate_connection_and_lag(client, connection_id, lag_id)
changed = True
return changed
def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update):
# the connection is found; get the latest state and see if it needs to be updated
if connection_id:
latest_state = connection_status(client, connection_id=connection_id)['connection']
if changed_properties(latest_state, location, bandwidth) and forced_update:
ensure_absent(client, connection_id)
return ensure_present(client=client,
connection_id=None,
connection_name=connection_name,
location=location,
bandwidth=bandwidth,
lag_id=lag_id,
forced_update=forced_update)
elif update_associations(client, latest_state, connection_id, lag_id):
return True, connection_id
# no connection found; create a new one
else:
return True, create_connection(client, location, bandwidth, connection_name, lag_id)
return False, connection_id
@AWSRetry.backoff(**retry_params)
def ensure_absent(client, connection_id):
changed = False
if connection_id:
delete_connection(client, connection_id)
changed = True
return changed
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(),
location=dict(),
bandwidth=dict(choices=['1Gbps', '10Gbps']),
link_aggregation_group=dict(),
connection_id=dict(),
forced_update=dict(type='bool', default=False)
))
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[('connection_id', 'name')],
required_if=[('state', 'present', ('location', 'bandwidth'))])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")
connection = boto3_conn(module, conn_type='client',
resource='directconnect', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
connection_id = connection_exists(connection,
connection_id=module.params.get('connection_id'),
connection_name=module.params.get('name'))
if not connection_id and module.params.get('connection_id'):
module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id')))
state = module.params.get('state')
try:
if state == 'present':
changed, connection_id = ensure_present(connection,
connection_id=connection_id,
connection_name=module.params.get('name'),
location=module.params.get('location'),
bandwidth=module.params.get('bandwidth'),
lag_id=module.params.get('link_aggregation_group'),
forced_update=module.params.get('forced_update'))
response = connection_status(connection, connection_id)
elif state == 'absent':
changed = ensure_absent(connection, connection_id)
response = {}
except DirectConnectError as e:
if e.response:
module.fail_json(msg=e.msg, exception=e.last_traceback, **e.response)
elif e.last_traceback:
module.fail_json(msg=e.msg, exception=e.last_traceback)
else:
module.fail_json(msg=e.msg)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
if __name__ == '__main__':
main()
| gpl-3.0 | 6,655,581,191,164,276,000 | 37.855263 | 147 | 0.630037 | false |
pylbert/upm | examples/python/rpr220.py | 6 | 2146 | #!/usr/bin/env python
# Author: Zion Orent <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_rpr220 as upmRpr220
def main():
# This example uses a simple method to determine current status
# Instantiate an RPR220 digital pin D2
# This was tested on the Grove IR Reflective Sensor
myReflectiveSensor = upmRpr220.RPR220(2)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This lets you run code on exit,
# including functions from myReflectiveSensor
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while(1):
if (myReflectiveSensor.blackDetected()):
print("Black detected")
else:
print("Black NOT detected")
time.sleep(.1)
if __name__ == '__main__':
main()
| mit | 5,392,803,090,927,367,000 | 35.372881 | 73 | 0.721342 | false |
bsmedberg/socorro | webapp-django/crashstats/supersearch/views.py | 1 | 10765 | import isodate
import datetime
import json
import math
import urllib
from collections import defaultdict
from django import http
from django.conf import settings
from django.contrib.auth.decorators import permission_required
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.views.decorators.http import require_POST
from django.utils.timezone import utc
from waffle.decorators import waffle_switch
from crashstats.api.views import has_permissions
from crashstats.crashstats import models, utils
from crashstats.crashstats.views import pass_default_context
from . import forms
from .form_fields import split_on_operator
from .models import SuperSearchFields, SuperSearchUnredacted, Query
DEFAULT_COLUMNS = (
'date',
'signature',
'product',
'version',
'build_id',
'platform',
)
DEFAULT_FACETS = (
'signature',
)
# Facetting on those fields doesn't provide useful information.
EXCLUDED_FIELDS_FROM_FACETS = (
'date',
'dump',
)
def get_allowed_fields(user):
return tuple(
x['name']
for x in SuperSearchFields().get().values()
if x['is_exposed']
and has_permissions(user, x['permissions_needed'])
)
def get_supersearch_form(request):
products = models.ProductsVersions().get()
versions = models.CurrentVersions().get()
platforms = models.Platforms().get()
all_fields = SuperSearchFields().get()
form = forms.SearchForm(
all_fields,
products,
versions,
platforms,
request.user,
request.GET
)
return form
def get_params(request):
form = get_supersearch_form(request)
if not form.is_valid():
return http.HttpResponseBadRequest(str(form.errors))
params = {}
for key in form.cleaned_data:
if hasattr(form.fields[key], 'prefixed_value'):
value = form.fields[key].prefixed_value
else:
value = form.cleaned_data[key]
params[key] = value
params['_facets'] = request.GET.getlist('_facets') or DEFAULT_FACETS
allowed_fields = get_allowed_fields(request.user)
# Make sure only allowed fields are used
params['_facets'] = [
x for x in params['_facets'] if x in allowed_fields
]
return params
@waffle_switch('supersearch-all')
@pass_default_context
def search(request, default_context=None):
allowed_fields = get_allowed_fields(request.user)
context = default_context
context['possible_facets'] = [
{'id': x, 'text': x.replace('_', ' ')} for x in allowed_fields
if x not in EXCLUDED_FIELDS_FROM_FACETS
]
context['possible_columns'] = [
{'id': x, 'text': x.replace('_', ' ')} for x in allowed_fields
]
context['facets'] = request.GET.getlist('_facets') or DEFAULT_FACETS
context['columns'] = request.GET.getlist('_columns') or DEFAULT_COLUMNS
return render(request, 'supersearch/search.html', context)
@waffle_switch('supersearch-all')
def search_results(request):
'''Return the results of a search. '''
params = get_params(request)
if isinstance(params, http.HttpResponseBadRequest):
# There was an error in the form, let's return it.
return params
data = {}
data['query'] = {
'total': 0,
'total_count': 0,
'total_pages': 0
}
allowed_fields = get_allowed_fields(request.user)
current_query = request.GET.copy()
if 'page' in current_query:
del current_query['page']
data['params'] = current_query.copy()
if '_columns' in data['params']:
del data['params']['_columns']
if '_facets' in data['params']:
del data['params']['_facets']
data['columns'] = request.GET.getlist('_columns') or DEFAULT_COLUMNS
# Make sure only allowed fields are used
data['columns'] = [
x for x in data['columns'] if x in allowed_fields
]
try:
current_page = int(request.GET.get('page', 1))
except ValueError:
return http.HttpResponseBadRequest('Invalid page')
if current_page <= 0:
current_page = 1
results_per_page = 50
data['current_page'] = current_page
data['results_offset'] = results_per_page * (current_page - 1)
params['_results_number'] = results_per_page
params['_results_offset'] = data['results_offset']
data['current_url'] = '%s?%s' % (
reverse('supersearch.search'),
current_query.urlencode()
)
api = SuperSearchUnredacted()
try:
search_results = api.get(**params)
except models.BadStatusCodeError, e:
# We need to return the error message in some HTML form for jQuery to
# pick it up.
return http.HttpResponseBadRequest('<ul><li>%s</li></ul>' % e)
if 'signature' in search_results['facets']:
# Bugs for each signature
signatures = [h['term'] for h in search_results['facets']['signature']]
if signatures:
bugs = defaultdict(list)
bugs_api = models.Bugs()
for b in bugs_api.get(signatures=signatures)['hits']:
bugs[b['signature']].append(b['id'])
for hit in search_results['facets']['signature']:
sig = hit['term']
if sig in bugs:
if 'bugs' in hit:
hit['bugs'].extend(bugs[sig])
else:
hit['bugs'] = bugs[sig]
search_results['total_pages'] = int(math.ceil(
search_results['total'] / float(results_per_page)))
search_results['total_count'] = search_results['total']
data['query'] = search_results
data['report_list_query_string'] = urllib.urlencode(
utils.sanitize_dict(
get_report_list_parameters(params)
),
True
)
return render(request, 'supersearch/search_results.html', data)
@waffle_switch('supersearch-all')
@utils.json_view
def search_fields(request):
'''Return the JSON document describing the fields used by the JavaScript
dynamic_form library. '''
form = get_supersearch_form(request)
exclude = request.GET.getlist('exclude')
return form.get_fields_list(exclude=exclude)
def get_report_list_parameters(source):
'''Return a list of parameters that are compatible with the report/list
page. This is not ideal and cannot be fully compatible because we have
operators in supersearch and not in report/list.
'''
params = {}
for key, value in source.items():
if not value:
continue
if key in (
'hang_type',
'platform',
'process_type',
'product',
'reason',
):
params[key] = value
elif key == 'release_channel':
params['release_channels'] = value
elif key == 'build_id':
params['build_id'] = []
for build in value:
operator, build = split_on_operator(build)
if operator:
# The report/list/ page is unable to understand operators.
continue
params['build_id'].append(build)
if not params['build_id']:
del params['build_id']
elif key == 'version':
if 'product' in source:
params['version'] = []
for p in source['product']:
for v in value:
params['version'].append('%s:%s' % (p, v))
elif key == 'date':
lower = upper = up_ope = None
for dt in value:
operator, dt = split_on_operator(dt)
dt = isodate.parse_datetime(dt)
if lower is None or upper is None:
lower = upper = dt
up_ope = operator
elif lower > dt:
lower = dt
elif upper < dt:
upper = dt
up_ope = operator
def to_hours(delta):
return delta.days * 24 + delta.seconds / 3600
if lower == upper:
# there's only one date
if up_ope is not None and '<' in up_ope:
params['date'] = upper
else:
params['date'] = (
datetime.datetime.utcnow().replace(tzinfo=utc)
)
params['range_value'] = to_hours(params['date'] - upper)
params['range_unit'] = 'hours'
else:
params['date'] = upper
params['range_value'] = to_hours(upper - lower)
params['range_unit'] = 'hours'
return params
@waffle_switch('supersearch-all')
@waffle_switch('supersearch-custom-query')
@permission_required('crashstats.run_custom_queries')
@pass_default_context
def search_custom(request, default_context=None):
'''Return the basic search page, without any result. '''
error = None
query = None
params = get_params(request)
if isinstance(params, http.HttpResponseBadRequest):
# There was an error in the form, but we want to do the default
# behavior and just display an error message.
error = params
else:
# Get the JSON query that supersearch generates and show it.
params['_return_query'] = 'true'
api = SuperSearchUnredacted()
try:
query = api.get(**params)
except models.BadStatusCodeError, e:
error = e
schema = settings.ELASTICSEARCH_INDEX_SCHEMA
now = datetime.datetime.utcnow().replace(tzinfo=utc)
possible_indices = []
for i in range(26):
index = (now - datetime.timedelta(weeks=i)).strftime(schema)
possible_indices.append({'id': index, 'text': index})
context = default_context
context['elasticsearch_indices'] = possible_indices
if query:
context['query'] = json.dumps(query['query'])
context['indices'] = ','.join(query['indices'])
context['error'] = error
return render(request, 'supersearch/search_custom.html', context)
@waffle_switch('supersearch-all')
@waffle_switch('supersearch-custom-query')
@permission_required('crashstats.run_custom_queries')
@require_POST
@utils.json_view
def search_query(request):
form = forms.QueryForm(request.POST)
if not form.is_valid():
return http.HttpResponseBadRequest(form.errors)
api = Query()
try:
results = api.get(
query=form.cleaned_data['query'],
indices=form.cleaned_data['indices']
)
except models.BadStatusCodeError, e:
return http.HttpResponseBadRequest(e.message)
return results
| mpl-2.0 | 7,692,848,706,769,278,000 | 28.173442 | 79 | 0.593312 | false |
pbs/django-cms | menus/base.py | 6 | 2114 | # -*- coding: utf-8 -*-
from django.utils.translation import get_language
from django.utils.encoding import smart_str
class Menu(object):
namespace = None
def __init__(self):
if not self.namespace:
self.namespace = self.__class__.__name__
def get_nodes(self, request):
"""
should return a list of NavigationNode instances
"""
raise NotImplementedError
class Modifier(object):
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
pass
class NavigationNode(object):
def __init__(self, title, url, id, parent_id=None, parent_namespace=None, attr=None, visible=True):
self.children = [] # do not touch
self.parent = None # do not touch, code depends on this
self.namespace = None # TODO: Assert why we need this and above
self.title = title
self.url = self._remove_current_root(url)
self.id = id
self.parent_id = parent_id
self.parent_namespace = parent_namespace
self.visible = visible
if attr:
self.attr = attr
else:
self.attr = {} # To avoid declaring a dict in defaults...
def __repr__(self):
return "<Navigation Node: %s>" % smart_str(self.title)
def _remove_current_root(self, url):
current_root = "/%s/" % get_language()
if url[:len(current_root)] == current_root:
url = url[len(current_root) - 1:]
return url
def get_menu_title(self):
return self.title
def get_absolute_url(self):
return self.url
def get_attribute(self, name):
return self.attr[name]
def get_descendants(self):
nodes = []
for node in self.children:
nodes.append(node)
nodes += node.get_descendants()
return nodes
def get_ancestors(self):
nodes = []
if getattr(self, 'parent', None):
nodes.append(self.parent)
nodes += self.parent.get_ancestors()
return nodes | bsd-3-clause | 4,657,401,836,683,723,000 | 28.375 | 103 | 0.569536 | false |
rishig/zulip | zerver/webhooks/greenhouse/tests.py | 2 | 3550 | # -*- coding: utf-8 -*-
from mock import MagicMock, patch
from zerver.lib.test_classes import WebhookTestCase
class GreenhouseHookTests(WebhookTestCase):
STREAM_NAME = 'greenhouse'
URL_TEMPLATE = "/api/v1/external/greenhouse?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'greenhouse'
CONTENT_TYPE = "application/x-www-form-urlencoded"
def test_message_candidate_hired(self) -> None:
expected_topic = "Hire Candidate - 19"
expected_message = """
Hire Candidate Johnny Smith (ID: 19), applying for:
* **Role**: Developer
* **Emails**: [email protected] (Personal), [email protected] (Work)
* **Attachments**: [Resume](https://prod-heroku.s3.amazonaws.com/...)
""".strip()
self.send_and_test_stream_message('candidate_hired',
expected_topic,
expected_message,
content_type=self.CONTENT_TYPE)
def test_message_candidate_rejected(self) -> None:
expected_topic = "Reject Candidate - 265788"
expected_message = """
Reject Candidate Hector Porter (ID: 265788), applying for:
* **Role**: Designer
* **Emails**: [email protected] (Personal)
* **Attachments**: [Resume](https://prod-heroku.s3.amazonaws.com/...)
""".strip()
self.send_and_test_stream_message('candidate_rejected',
expected_topic,
expected_message,
content_type=self.CONTENT_TYPE)
def test_message_candidate_stage_change(self) -> None:
expected_topic = "Candidate Stage Change - 265772"
expected_message = """
Candidate Stage Change Giuseppe Hurley (ID: 265772), applying for:
* **Role**: Designer
* **Emails**: [email protected] (Personal)
* **Attachments**: [Resume](https://prod-heroku.s3.amazonaws.com/...), [Cover_Letter](https://prod-heroku.s3.amazonaws.com/...), [Attachment](https://prod-heroku.s3.amazonaws.com/...)
""".strip()
self.send_and_test_stream_message('candidate_stage_change',
expected_topic,
expected_message,
content_type=self.CONTENT_TYPE)
def test_message_prospect_created(self) -> None:
expected_topic = "New Prospect Application - 968190"
expected_message = """
New Prospect Application Trisha Troy (ID: 968190), applying for:
* **Role**: Designer
* **Emails**: [email protected] (Personal)
* **Attachments**: [Resume](https://prod-heroku.s3.amazonaws.com/...)
""".strip()
self.send_and_test_stream_message('prospect_created',
expected_topic,
expected_message,
content_type=self.CONTENT_TYPE)
@patch('zerver.webhooks.greenhouse.view.check_send_webhook_message')
def test_ping_message_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url()
payload = self.get_body('ping_event')
result = self.client_post(self.url, payload, content_type=self.CONTENT_TYPE)
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("greenhouse", fixture_name, file_type="json")
| apache-2.0 | -409,897,108,513,201,600 | 43.936709 | 183 | 0.590986 | false |
vijayendrabvs/hap | neutron/services/loadbalancer/drivers/embrane/db.py | 12 | 1804 | # Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc. [email protected]
import neutron.db.api as db
from neutron.db import models_v2 as nmodel
from neutron.services.loadbalancer.drivers.embrane import models
def initialize():
db.configure_db()
def add_pool_port(context, pool_id, port_id):
session = context.session
with session.begin(subtransactions=True):
pool_port = models.PoolPort()
pool_port.pool_id = pool_id
pool_port.port_id = port_id
session.add(pool_port)
def get_pool_port(context, pool_id):
return (context.session.query(models.PoolPort).filter_by(pool_id=pool_id).
first())
def delete_pool_backend(context, pool_id):
session = context.session
backend = (session.query(models.PoolPort).filter_by(
pool_id=pool_id))
for b in backend:
delete_pool_port(context, b)
def delete_pool_port(context, backend_port):
session = context.session
with session.begin(subtransactions=True):
port = (session.query(nmodel.Port).filter_by(
id=backend_port['port_id'])).first()
if port:
session.delete(backend_port)
session.delete(port)
| apache-2.0 | -1,744,174,685,889,021,000 | 31.214286 | 78 | 0.68459 | false |
mscuthbert/abjad | abjad/tools/selectiontools/test/test_selectiontools_VerticalMoment___len__.py | 2 | 1499 | # -*- encoding: utf-8 -*-
from abjad import *
def test_selectiontools_VerticalMoment___len___01():
score = Score(
r'''
\new Staff {
\times 4/3 {
d''8
c''8
b'8
}
}
\new PianoStaff <<
\new Staff {
a'4
g'4
}
\new Staff {
\clef "bass"
f'8
e'8
d'8
c'8
}
>>
'''
)
staff_group = score[1]
vertical_moment = inspect_(score).get_vertical_moment_at(Offset(1, 8))
"VerticalMoment(Score<<2>>, Staff{1}, {@ 3:4 d''8, c''8, b'8 @}, d''8, PianoStaff<<2>>, Staff{2}, a'4, Staff{4}, e'8)"
assert len(vertical_moment) == 9
vertical_moment = inspect_(score[0]).get_vertical_moment_at(Offset(1, 8))
"VerticalMoment(Staff{1}, {@ 3:4 d''8, c''8, b'8 @}, d''8)"
assert len(vertical_moment) == 3
vertical_moment = inspect_(staff_group).get_vertical_moment_at(Offset(1, 8))
"VerticalMoment(PianoStaff<<2>>, Staff{2}, a'4, Staff{4}, e'8)"
assert len(vertical_moment) == 5
vertical_moment = inspect_(staff_group[0]).get_vertical_moment_at(Offset(1, 8))
"VerticalMoment(Staff{2}, a'4)"
assert len(vertical_moment) == 2
vertical_moment = inspect_(staff_group[1]).get_vertical_moment_at(Offset(1, 8))
"VerticalMoment(Staff{2}, e'8)"
assert len(vertical_moment) == 2 | gpl-3.0 | 8,077,643,505,249,827,000 | 27.846154 | 122 | 0.496998 | false |
andrewyoung1991/abjad | abjad/tools/labeltools/label_leaves_in_expr_with_named_interval_classes.py | 2 | 1742 | # -*- encoding: utf-8 -*-
from abjad.tools import markuptools
from abjad.tools import pitchtools
from abjad.tools import scoretools
from abjad.tools.topleveltools import attach
from abjad.tools.topleveltools import iterate
def label_leaves_in_expr_with_named_interval_classes(expr, markup_direction=Up):
r"""Label leaves in `expr` with named interval classes:
::
>>> notes = scoretools.make_notes([0, 25, 11, -4, -14, -13, 9, 10, 6, 5], [Duration(1, 8)])
>>> staff = Staff(notes)
>>> labeltools.label_leaves_in_expr_with_named_interval_classes(staff)
.. doctest::
>>> print(format(staff))
\new Staff {
c'8 ^ \markup { +aug8 }
cs'''8 ^ \markup { -M2 }
b'8 ^ \markup { -aug2 }
af8 ^ \markup { -m7 }
bf,8 ^ \markup { aug1 }
b,8 ^ \markup { +m7 }
a'8 ^ \markup { +m2 }
bf'8 ^ \markup { -dim4 }
fs'8 ^ \markup { aug1 }
f'8
}
::
>>> show(staff) # doctest: +SKIP
Returns none.
"""
for note in iterate(expr).by_class(scoretools.Note):
logical_voice_iterator = iterate(note).by_logical_voice_from_component(
scoretools.Leaf,
)
try:
next(logical_voice_iterator)
next_leaf = next(logical_voice_iterator)
if isinstance(next_leaf, scoretools.Note):
mdi = pitchtools.NamedInterval.from_pitch_carriers(
note, next_leaf)
mdic = pitchtools.NamedIntervalClass(mdi)
markup = markuptools.Markup(mdic, markup_direction)
attach(markup, note)
except StopIteration:
pass
| gpl-3.0 | 9,164,501,160,472,659,000 | 30.672727 | 99 | 0.54822 | false |
sparkslabs/kamaelia | Sketches/MPS/Old/greylisting.py | 3 | 23214 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import Axon
import socket
import time
import math
import anydbm
import pprint
import copy
import os
from Axon.Ipc import producerFinished, WaitComplete
from Kamaelia.Chassis.ConnectedServer import MoreComplexServer
from Kamaelia.IPC import socketShutdown
from Kamaelia.Internet.TCPClient import TCPClient
class MailHandler(Axon.Component.component):
logfile = "greylist.log"
debuglogfile = "greylist-debug.log"
def __init__(self,**argd):
super(MailHandler, self).__init__(**argd)
self.inbox_log = []
self.line = None
def logging_recv_connection(self):
self.line = self.recv("inbox")
self.inbox_log.append(self.line)
def getline(self):
control_message = ""
while 1:
while not self.anyReady():
self.pause(); # print "PAUSING", repr(self.inbox_log), repr(self.line)
yield 1
while self.dataReady("control"):
control_message = self.recv("control")
if isinstance(control_message, socketShutdown):
self.client_connected = False
if self.dataReady("inbox"):
self.logging_recv_connection()
return
else:
if not self.client_connected :
self.breakConnection = True
return
yield 1
def handleCommand(self,command):
if len(command) < 1:
self.netPrint("500 Sorry we don't like broken mailers")
self.breakConnection = True
return
if command[0] == "HELO": return self.handleHelo(command) # RFC 2821 4.5.1 required
if command[0] == "EHLO": return self.handleEhlo(command) # RFC 2821 4.5.1 required
if command[0] == "MAIL": return self.handleMail(command) # RFC 2821 4.5.1 required
if command[0] == "RCPT": return self.handleRcpt(command) # RFC 2821 4.5.1 required
if command[0] == "DATA": return self.handleData(command) # RFC 2821 4.5.1 required
if command[0] == "QUIT": return self.handleQuit(command) # RFC 2821 4.5.1 required
if command[0] == "RSET": return self.handleRset(command) # RFC 2821 4.5.1 required
if command[0] == "NOOP": return self.handleNoop(command) # RFC 2821 4.5.1 required
if command[0] == "VRFY": return self.handleVrfy(command) # RFC 2821 4.5.1 required
if command[0] == "HELP": return self.handleHelp(command)
self.netPrint("500 Sorry we don't like broken mailers")
self.breakConnection = True
def noteToLog(self, line):
try:
x = open(self.logfile,"a")
except IOError:
x = open(self.logfile,"w")
x.write(line+"\n")
x.flush()
x.close()
def noteToDebugLog(self, line):
try:
x = open(self.debuglogfile,"a")
except IOError:
x = open(self.debuglogfile,"w")
x.write(line+"\n")
x.flush()
x.close()
def netPrint(self, *args):
for i in args:
self.noteToDebugLog(i)
self.send(i+"\r\n", "outbox")
def handleConnect(self): pass
def handleHelo(self,command): pass
def handleEhlo(self,command): pass
def handleMail(self,command): pass
def handleRcpt(self,command): pass
def handleData(self,command): pass
def handleQuit(self,command): pass
def handleRset(self,command): pass
def handleNoop(self,command): pass
def handleVrfy(self,command): pass
def handleHelp(self,command): pass
def logResult(self): pass
def handleDisconnect(self): yield 1
def lastline(self):
if self.line == ".\r\n":
return True
if len(self.line) >=5:
if self.line[-5:] == "\r\n.\r\n":
return True
if len(self.line) >=5:
if self.line[-5:] == "\r\n.\r\n":
return True
if len(self.line) >=4:
if self.line[-4:] == "\n.\r\n":
return True
return False
def main(self):
brokenClient = False
self.handleConnect()
self.gettingdata = False
self.client_connected = True
self.breakConnection = False
while (not self.gettingdata) and (not self.breakConnection):
yield WaitComplete(self.getline(), tag="_getline1")
try:
command = self.line.split()
except AttributeError:
brokenClient = True
break
self.handleCommand(command)
if not brokenClient:
if (not self.breakConnection):
EndOfMessage = False
self.netPrint('354 Enter message, ending with "." on a line by itself')
while not EndOfMessage:
yield WaitComplete(self.getline(), tag="getline2")
if self.lastline():
EndOfMessage = True
self.netPrint("250 OK id-deferred")
self.send(producerFinished(),"signal")
if not brokenClient:
yield WaitComplete(self.handleDisconnect(),tag="_handleDisconnect")
self.logResult()
class ConcreteMailHandler(MailHandler):
Inboxes = {
"inbox" : "Data from the client connecting to the server comes in here",
"control" : "Shutdown & control messages regarding client side socket handling",
"tcp_inbox" : "This is where we get respones from the real SMTP server",
"tcp_control" : "This is where we get shutdown information from the real SMTP server",
}
Outboxes = {
"outbox" : "Data sent here goes back the the client connecting to the server",
"signal" : "Shutdown & control messages regarding client side socket handling",
"tcp_outbox" : "Data sent here is sent to the real SMTP server",
"tcp_signal" : "We send messages here to shutdown the connection to the real SMTP connection",
}
peer = "*** UNDEFINED ***"
peerport = "*** UNDEFINED ***"
local = "*** UNDEFINED ***"
localport = "*** UNDEFINED ***"
servername = "Testing.server.local"
serverid = "MPS SMTP 1.0"
smtp_ip = "192.168.2.9"
smtp_port = 25
def connectToRealSMTPServer(self):
self.TCPClient = TCPClient(self.smtp_ip, self.smtp_port).activate()
self.link((self, "tcp_outbox"), (self.TCPClient, "inbox"))
self.link((self, "tcp_signal"), (self.TCPClient, "control"))
self.link((self.TCPClient, "outbox"), (self,"tcp_inbox"))
self.link((self.TCPClient, "signal"), (self,"tcp_control"))
def __init__(self, **argv):
super(ConcreteMailHandler, self).__init__(**argv)
self.recipients = []
self.sender = None
self.remotename = ""
self.seenHelo = False
self.seenMail = False
self.seenRcpt = False
self.acceptingMail = False
self.mailStatus = ""
def error(self, message): # Yes, we're quite nasty - we break the connection if the person makes a mistake
self.netPrint(message) # This violate's Postel's law. The idea is to catch out broken spam mailers...
self.breakConnection = True
def RelayError(self):
self.error("550 relay not permitted")
def handleConnect(self):
self.netPrint("220 %s ESMTP %s %s" %
(self.servername,
self.serverid,
time.ctime())
)
def handleEhlo(self,command):
self.netPrint('500 Command Not Recognised')
def handleHelo(self,command):
self.actual_remote_ip = "192.168.2.5"
if len(command) != 2:
self.error("501 Syntactically invalid HELO argument(s)")
return
self.remotename = command[1]
self.netPrint("250 %s Hello %s %s" %
(self.servername, self.remotename,self.peer)
)
self.inbox_log = self.inbox_log[-1:] # Remove all previous items
self.seenHelo = True
def handleHelp(self,command):
self.error("500 unrecognised command")
def handleVrfy(self,command):
self.netPrint("252 Cannot VRFY user")
def handleRset(self,command):
# self.seenHelo = self.seenHelo - leave unchanged (comment is to note we *have* thought about this!)
self.recipients = []
self.sender = None
self.seenMail = False
self.seenRcpt = False
self.acceptingMail = False
self.netPrint("250 OK")
self.mailStatus = ""
def handleNoop(self,command):
self.netPrint("250 OK")
def handleMail(self,command):
if len(command) < 2:
self.error("500 unrecognised command")
return
if len(command) == 2:
if command[1][:5].upper() == "FROM:" and len(command[1])>5 :
command = [ command[0], "FROM:", command[1][5:] ]
else:
self.error("501 MAIL must have an address operand"+repr(command))
return
if command[1].upper() != "FROM:":
self.error("500 unrecognised command")
return
if not self.seenHelo:
self.netPrint("503 5.5.2 Send hello first")
return
if self.seenMail:
self.netPrint("503 sender already given")
return
self.sender = command[2]
self.netPrint("250 OK")
self.seenMail = True
def handleRcpt(self,command):
if len(command) < 2: # Protocol syntax error
self.error("500 unrecognised command")
return
if len(command) == 2: # Protocol syntax error
if command[1][:3].upper() == "TO:" and len(command[1])>3 :
command = [ command[0], "TO:", command[1][3:] ]
else:
self.error("501 RCPT must have an address operand"+repr(command))
return
if command[1].upper() != "TO:": # Protocol syntax error
self.error("500 unrecognised command")
return
if not self.seenMail: # Protocol usage error
self.error("503 sender not yet given")
return
self.netPrint("250 ACCEPTED")
self.recipients.append(command[2])
self.seenRcpt = True
def handleData(self, command):
if not self.seenRcpt:
self.error("503 valid RCPT command must precede DATA")
return
if self.shouldWeAcceptMail():
self.acceptMail()
else:
self.deferMail()
def handleQuit(self,command):
self.netPrint("221 %s closing connection" % (self.servername,))
self.breakConnection = True
def shouldWeAcceptMail(self):
return False # Default policy - don't accept any email
def deferMail(self):
self.netPrint("451 4.7.1 Please try again later")
self.breakConnection = True
self.mailStatus = "DEFERRED"
def acceptMail(self):
self.gettingdata = True
self.acceptingMail = True
self.mailStatus = "ACCEPTED"
def getline_fromsmtpserver(self):
while not self.dataReady("tcp_inbox"):
self.pause()
yield 1
self.smtp_line = self.recv("tcp_inbox")
def handleDisconnect(self):
if not self.acceptingMail: return
self.connectToRealSMTPServer()
yield 1
sentDataLine = False
for line in self.inbox_log:
if not sentDataLine: # wait for a response from the server before sending next line
yield WaitComplete(self.getline_fromsmtpserver(),tag="getline_smtp")
self.send(line, "tcp_outbox")
yield 1
if not sentDataLine:
sentDataLine = (line == "DATA\r\n")
yield 1
self.send(producerFinished(), "tcp_signal")
class GreyListingPolicy(ConcreteMailHandler):
allowed_senders = [] # List of senders
allowed_sender_nets = [] # Only class A,B, C network style networks at present (ie IP prefixes)
allowed_domains = [ ] # list of IPs
def sentFromAllowedIPAddress(self):
if self.peer in self.allowed_senders:
return True
return False
def sentFromAllowedNetwork(self):
for network_prefix in self.allowed_sender_nets:
if self.peer[:len(network_prefix)] == network_prefix:
return True
return False
def sentToADomainWeForwardFor(self):
for recipient in self.recipients:
recipient = recipient.replace("<", "")
recipient = recipient.replace(">", "")
try:
domain = recipient[recipient.find("@")+1:]
domain = domain.lower()
if not (domain in self.allowed_domains):
return False
except:
raise
return False # don't care why it fails if it fails
return True # Only reach here if all domains in allowed_domains
def isGreylisted(self, recipient):
max_grey = 3000000
too_soon = 180
min_defer_time = 3600
max_defer_time = 25000
IP = self.peer
sender = self.sender
def _isGreylisted(greylist, seen, IP,sender,recipient):
# If greylisted, and not been there too long, allow through
if greylist.get(triplet,None) is not None:
greytime = float(greylist[triplet])
if (time.time() - greytime) > max_grey:
del greylist[triplet]
try:
del seen[triplet]
except KeyError:
# We don't care if it's already gone
pass
# REFUSED: grey too long
else:
# ACCEPTED: already grey (have reset greytime)
greylist[triplet] = str(time.time())
return True
# If not seen this triplet before, defer and note triplet
if seen.get( triplet, None) is None:
seen[triplet] = str(time.time())
return False
# If triplet retrying waaay too soon, reset their timer & defer
last_tried = float(seen[triplet])
if (time.time() - last_tried) < too_soon:
seen[triplet] = str(time.time())
return False
# If triplet retrying too soon generally speaking just defer
if (time.time() - last_tried) < min_defer_time :
return False
# If triplet hasn't been seen in aaaages, defer
if (time.time() - last_tried) > max_defer_time :
seen[triplet] = str(time.time())
return False
# Otherwise, allow through & greylist then
greylist[triplet] = str(time.time())
return True
greylist = anydbm.open("greylisted.dbm","c")
seen = anydbm.open("attempters.dbm","c")
triplet = repr((IP,sender,recipient))
result = _isGreylisted(greylist, seen, IP,sender,recipient)
seen.close()
greylist.close()
return result
def whiteListed(self, recipient):
for (IP, sender, r) in self.whitelisted_triples:
if self.peer == IP:
if self.sender == sender:
if recipient == r:
return True
for (remotename, network_prefix, r) in self.whitelisted_nonstandard_triples:
if remotename == self.remotename:
if self.peer[:len(network_prefix)] == network_prefix:
if r == recipient:
return True
return False
def shouldWeAcceptMail(self):
if self.sentFromAllowedIPAddress(): return True # Allowed hosts can always send to anywhere through us
if self.sentFromAllowedNetwork(): return True # People on truste networks can always do the same
if self.sentToADomainWeForwardFor():
try:
for recipient in self.recipients:
if self.whiteListed(recipient):
return True
if not self.isGreylisted(recipient):
return False
except Exception, e:
# print "Whoops", e
pass
return True # Anyone can always send to hosts we own
return False
def logResult(self):
def m(x, w=2):
return "0"*(w-len(str(x)))+str(x)
now = time.time()
msec = int((now -math.floor(now))*1000)
x= time.gmtime(now)
stamp = "".join([ str(z) for z in [ m(x.tm_year,4), m(x.tm_mon,2), m(x.tm_mday,2), m(x.tm_hour,2), m(x.tm_min,2), m(x.tm_sec,2), ".", m(msec,3) ] ])
logline = str(stamp) + " | "
logline += str(self.remotename) + " | "
logline += str(self.peer) + " | "
logline += str(self.sender) + " | "
logline += str(", ".join(self.recipients)) + " | "
logline += str(self.mailStatus) + " | "
self.noteToLog(logline)
# print logline
class PeriodicWakeup(Axon.ThreadedComponent.threadedcomponent):
interval = 300
def main(self):
while 1:
time.sleep(self.interval)
self.send("tick", "outbox")
class WakeableIntrospector(Axon.Component.component):
logfile = "greylist-debug.log"
def noteToLog(self, line):
try:
x = open(self.logfile,"a")
except IOError:
x = open(self.logfile,"w")
x.write(line+"\n")
x.flush()
x.close()
def main(self):
while 1:
Q = [ q.name for q in self.scheduler.listAllThreads() ]
Q.sort()
self.noteToLog("*debug* THREADS"+ str(Q))
self.scheduler.debuggingon = False
yield 1
while not self.dataReady("inbox"):
self.pause()
yield 1
while self.dataReady("inbox"): self.recv("inbox")
from Kamaelia.Chassis.Pipeline import Pipeline
Pipeline(
PeriodicWakeup(),
WakeableIntrospector(),
).activate()
from Kamaelia.Internet.TimeOutCSA import NoActivityTimeout
from Kamaelia.Internet.ConnectedSocketAdapter import ConnectedSocketAdapter
from Kamaelia.Internet.TCPServer import TCPServer
config_files = ["/usr/local/etc/Kamaelia/greylist.conf",
"/usr/local/etc/greylist.conf",
"/etc/Kamaelia/greylist.conf",
"/etc/greylist.conf",
"greylist.conf",
"/usr/local/etc/Kamaelia/greylist.conf.dist",
"/usr/local/etc/greylist.conf.dist",
"/etc/Kamaelia/greylist.conf.dist",
"/etc/greylist.conf.dist",
"greylist.conf.dist" ]
default_config = { 'allowed_domains': [],
'allowed_sender_nets': [],
'allowed_senders': ['127.0.0.1'],
'port': 25,
"greylist_log": "greylist.log",
"greylist_debuglog" : "greylist-debug.log",
"inactivity_timeout": 60,
'serverid': 'Kamaelia-SMTP 1.0',
'servername': 'mail.example.com',
'smtp_ip': '192.168.2.9',
'smtp_port': 8025,
'whitelisted_nonstandard_triples': [],
'whitelisted_triples': []
}
def openConfig(config_file):
f = open(config_file)
lines = f.readlines()
f.close()
return lines
def parseConfigFile(lines, default_config):
config = copy.deepcopy(default_config)
l = 0
while l<len(lines):
line = lines[l][:-1] # remove newline
line = line.rstrip()
if len(line) != 0:
if "#" == line[0]:
pass # skip
elif "=" in line:
bits = line.split("=")
thing = bits[0].strip().rstrip()
what = bits[1].strip().rstrip()
if (thing == "port") or (thing == "smtp_port") or (thing == "inactivity_timeout"):
what = int(what)
config[thing] = what
else:
if line[-1] == ":":
thing = line[:-1]
if config.get(thing) == None:
config[thing] = []
while (l+1)<len(lines):
l+=1
line = lines[l][:-1] # remove newline
x = line.rstrip()
y = line.strip()
if x==y:
break
if " " in y:
config[thing].append(tuple(y.split(" ")))
else:
config[thing].append(y)
l-=1
l+=1
return config
config_used = None
for config_file in config_files:
try:
lines = openConfig(config_file)
except IOError:
pass
else:
config_used =config_file
break
if config_used is not None:
config = parseConfigFile(lines,default_config)
else:
config = default_config
config_used = "DEFAULT INTERNAL"
class GreylistServer(MoreComplexServer):
logfile = config["greylist_log"]
debuglogfile = config["greylist_debuglog"]
socketOptions=(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = config["port"]
class TCPS(TCPServer):
CSA = NoActivityTimeout(ConnectedSocketAdapter, timeout=config["inactivity_timeout"], debug=False)
class protocol(GreyListingPolicy):
servername = config["servername"]
serverid = config["serverid"]
smtp_ip = config["smtp_ip"]
smtp_port = config["smtp_port"]
allowed_senders = config["allowed_senders"]
allowed_sender_nets = config["allowed_sender_nets"] # Yes, only class C network style
allowed_domains = config["allowed_domains"]
whitelisted_triples = config["whitelisted_triples"]
whitelisted_nonstandard_triples = config["whitelisted_nonstandard_triples"]
WakeableIntrospector.logfile = config["greylist_debuglog"]
MailHandler.logfile = config["greylist_log"]
MailHandler.debuglogfile = config["greylist_debuglog"]
GreylistServer().run()
| apache-2.0 | -8,248,058,968,048,290,000 | 35.215289 | 157 | 0.563281 | false |
chriscrosscutler/scikit-image | skimage/feature/peak.py | 18 | 6225 | import numpy as np
import scipy.ndimage as ndi
from ..filters import rank_order
def peak_local_max(image, min_distance=10, threshold_abs=0, threshold_rel=0.1,
exclude_border=True, indices=True, num_peaks=np.inf,
footprint=None, labels=None):
"""
Find peaks in an image, and return them as coordinates or a boolean array.
Peaks are the local maxima in a region of `2 * min_distance + 1`
(i.e. peaks are separated by at least `min_distance`).
NOTE: If peaks are flat (i.e. multiple adjacent pixels have identical
intensities), the coordinates of all such pixels are returned.
Parameters
----------
image : ndarray of floats
Input image.
min_distance : int
Minimum number of pixels separating peaks in a region of `2 *
min_distance + 1` (i.e. peaks are separated by at least
`min_distance`). If `exclude_border` is True, this value also excludes
a border `min_distance` from the image boundary.
To find the maximum number of peaks, use `min_distance=1`.
threshold_abs : float
Minimum intensity of peaks.
threshold_rel : float
Minimum intensity of peaks calculated as `max(image) * threshold_rel`.
exclude_border : bool
If True, `min_distance` excludes peaks from the border of the image as
well as from each other.
indices : bool
If True, the output will be an array representing peak coordinates.
If False, the output will be a boolean array shaped as `image.shape`
with peaks present at True elements.
num_peaks : int
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` peaks based on highest peak intensity.
footprint : ndarray of bools, optional
If provided, `footprint == 1` represents the local region within which
to search for peaks at every point in `image`. Overrides
`min_distance`, except for border exclusion if `exclude_border=True`.
labels : ndarray of ints, optional
If provided, each unique region `labels == value` represents a unique
region to search for peaks. Zero is reserved for background.
Returns
-------
output : ndarray or ndarray of bools
* If `indices = True` : (row, column, ...) coordinates of peaks.
* If `indices = False` : Boolean array shaped like `image`, with peaks
represented by True values.
Notes
-----
The peak local maximum function returns the coordinates of local peaks
(maxima) in a image. A maximum filter is used for finding local maxima.
This operation dilates the original image. After comparison between
dilated and original image, peak_local_max function returns the
coordinates of peaks where dilated image = original.
Examples
--------
>>> img1 = np.zeros((7, 7))
>>> img1[3, 4] = 1
>>> img1[3, 2] = 1.5
>>> img1
array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 1.5, 0. , 1. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
>>> peak_local_max(img1, min_distance=1)
array([[3, 2],
[3, 4]])
>>> peak_local_max(img1, min_distance=2)
array([[3, 2]])
>>> img2 = np.zeros((20, 20, 20))
>>> img2[10, 10, 10] = 1
>>> peak_local_max(img2, exclude_border=False)
array([[10, 10, 10]])
"""
out = np.zeros_like(image, dtype=np.bool)
# In the case of labels, recursively build and return an output
# operating on each label separately
if labels is not None:
label_values = np.unique(labels)
# Reorder label values to have consecutive integers (no gaps)
if np.any(np.diff(label_values) != 1):
mask = labels >= 1
labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype)
labels = labels.astype(np.int32)
# New values for new ordering
label_values = np.unique(labels)
for label in label_values[label_values != 0]:
maskim = (labels == label)
out += peak_local_max(image * maskim, min_distance=min_distance,
threshold_abs=threshold_abs,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
indices=False, num_peaks=np.inf,
footprint=footprint, labels=None)
if indices is True:
return np.transpose(out.nonzero())
else:
return out.astype(np.bool)
if np.all(image == image.flat[0]):
if indices is True:
return []
else:
return out
image = image.copy()
# Non maximum filter
if footprint is not None:
image_max = ndi.maximum_filter(image, footprint=footprint,
mode='constant')
else:
size = 2 * min_distance + 1
image_max = ndi.maximum_filter(image, size=size, mode='constant')
mask = (image == image_max)
image *= mask
if exclude_border:
# zero out the image borders
for i in range(image.ndim):
image = image.swapaxes(0, i)
image[:min_distance] = 0
image[-min_distance:] = 0
image = image.swapaxes(0, i)
# find top peak candidates above a threshold
peak_threshold = max(np.max(image.ravel()) * threshold_rel, threshold_abs)
# get coordinates of peaks
coordinates = np.argwhere(image > peak_threshold)
if coordinates.shape[0] > num_peaks:
intensities = image.flat[np.ravel_multi_index(coordinates.transpose(),image.shape)]
idx_maxsort = np.argsort(intensities)[::-1]
coordinates = coordinates[idx_maxsort][:num_peaks]
if indices is True:
return coordinates
else:
nd_indices = tuple(coordinates.T)
out[nd_indices] = True
return out
| bsd-3-clause | 2,725,758,630,431,916,000 | 37.664596 | 91 | 0.578474 | false |
alfred82santa/dirty-models | tests/dirty_models/tests_fields.py | 1 | 73981 | from datetime import date, datetime, time, timedelta, timezone
from enum import Enum
from unittest import TestCase
import iso8601
import sys
from dateutil import tz
from dirty_models.base import AccessMode
from dirty_models.fields import ArrayField, BaseField, BooleanField, BytesField, DateField, DateTimeField, EnumField, \
FloatField, HashMapField, IntegerField, ModelField, MultiTypeField, StringField, StringIdField, TimeField, \
TimedeltaField
from dirty_models.model_types import ListModel
from dirty_models.models import BaseModel, HashMapModel
class TestFields(TestCase):
def test_float_field_using_int(self):
field = FloatField()
self.assertFalse(field.check_value(3))
self.assertTrue(field.can_use_value(3))
self.assertEqual(field.use_value(3), 3)
def test_float_field_using_float(self):
field = FloatField()
self.assertTrue(field.check_value(3.0))
self.assertEqual(field.use_value(3.0), 3.0)
def test_float_field_using_str(self):
field = FloatField()
self.assertFalse(field.check_value("3.0"))
self.assertTrue(field.can_use_value("3.0"))
self.assertEqual(field.use_value("3.0"), 3.0)
def test_float_field_using_str_negative(self):
field = FloatField()
self.assertFalse(field.check_value("-3.0"))
self.assertTrue(field.can_use_value("-3.0"))
self.assertEqual(field.use_value("-3.0"), -3.0)
def test_float_field_using_dict(self):
field = FloatField()
self.assertFalse(field.check_value({}))
self.assertFalse(field.can_use_value({}))
def test_str_field_using_int(self):
field = StringField()
self.assertFalse(field.check_value(3))
self.assertTrue(field.can_use_value(3))
self.assertEqual(field.use_value(3), "3")
def test_str_field_using_float(self):
field = StringField()
self.assertFalse(field.check_value(3.0))
self.assertTrue(field.can_use_value(3.0))
self.assertEqual(field.use_value(3.0), "3.0")
def test_str_field_using_str(self):
field = StringField()
self.assertTrue(field.check_value("aaasa"))
self.assertEqual(field.use_value("aaasa"), "aaasa")
def test_str_field_using_dict(self):
field = StringField()
self.assertFalse(field.check_value({}))
self.assertFalse(field.can_use_value({}))
def test_bool_field_using_int_any(self):
field = BooleanField()
self.assertFalse(field.check_value(3))
self.assertTrue(field.can_use_value(3))
self.assertTrue(field.use_value(3))
def test_bool_field_using_int_0(self):
field = BooleanField()
self.assertFalse(field.check_value(0))
self.assertTrue(field.can_use_value(9))
self.assertFalse(field.use_value(0))
def test_bool_field_using_float(self):
field = BooleanField()
self.assertFalse(field.check_value(3.0))
self.assertFalse(field.can_use_value(3.0))
def test_bool_field_using_str_true(self):
field = BooleanField()
self.assertFalse(field.check_value("TrUe "))
self.assertTrue(field.can_use_value("TrUe "))
self.assertTrue(field.use_value("TrUe "))
def test_bool_field_using_str_false(self):
field = BooleanField()
self.assertFalse(field.check_value("False"))
self.assertTrue(field.can_use_value("False"))
self.assertFalse(field.use_value("False"))
def test_bool_field_using_str_any(self):
field = BooleanField()
self.assertFalse(field.check_value("aaasa"))
self.assertTrue(field.can_use_value("aaasa"))
self.assertFalse(field.use_value("aaasa"))
def test_bool_field_using_dict(self):
field = BooleanField()
self.assertFalse(field.check_value({}))
self.assertFalse(field.can_use_value({}))
def test_bool_field_using_bool_true(self):
field = BooleanField()
self.assertTrue(field.check_value(True))
self.assertTrue(field.use_value(True))
def test_bool_field_using_bool_false(self):
field = BooleanField()
self.assertTrue(field.check_value(False))
self.assertFalse(field.use_value(False))
def test_int_field_on_class_using_int(self):
class TestModel(BaseModel):
field_name = IntegerField()
model = TestModel()
model.field_name = 3
self.assertEqual(model.field_name, 3)
def test_string_field_on_class_using_empty_string(self):
class TestModel(BaseModel):
field_name = StringField()
model = TestModel()
model.field_name = ""
self.assertEqual(model.field_name, "")
def test_string_id_field_on_class_using_string(self):
class TestModel(BaseModel):
field_name = StringIdField()
model = TestModel()
model.field_name = "id"
self.assertIsNotNone(model.field_name)
def test_string_id_field_on_class_using_number(self):
class TestModel(BaseModel):
field_name = StringIdField()
model = TestModel()
model.field_name = 1
self.assertIsNotNone(model.field_name)
def test_string_id_field_on_class_using_empty_string(self):
class TestModel(BaseModel):
field_name = StringIdField()
model = TestModel()
model.field_name = ""
self.assertIsNone(model.field_name)
def test_string_id_field_on_class_using_empty_string_and_delete_value(self):
class TestModel(BaseModel):
field_name = StringIdField()
model = TestModel()
model.field_name = "name"
model.field_name = ""
self.assertIsNone(model.field_name)
def test_int_field_on_class_using_float(self):
class TestModel(BaseModel):
field_name = IntegerField()
model = TestModel()
model.field_name = 3.0
self.assertEqual(model.field_name, 3)
def test_int_field_on_class_using_str(self):
class TestModel(BaseModel):
field_name = IntegerField()
model = TestModel()
model.field_name = "3"
self.assertEqual(model.field_name, 3)
def test_int_field_on_class_using_str_hex(self):
class TestModel(BaseModel):
field_name = IntegerField()
model = TestModel()
model.field_name = "0x13"
self.assertEqual(model.field_name, 19)
def test_int_field_on_class_using_str_oct(self):
class TestModel(BaseModel):
field_name = IntegerField()
model = TestModel()
model.field_name = "0o13"
self.assertEqual(model.field_name, 11)
if sys.version_info >= (3, 6):
def test_int_field_on_class_using_str_undescore(self):
class TestModel(BaseModel):
field_name = IntegerField()
model = TestModel()
model.field_name = "1_345_232"
self.assertEqual(model.field_name, 1345232)
def test_int_field_on_class_using_dict(self):
class TestModel(BaseModel):
field_name = IntegerField()
model = TestModel()
model.field_name = {}
self.assertIsNone(model.field_name)
def test_float_field_on_class_using_int(self):
class TestModel(BaseModel):
field_name = FloatField()
model = TestModel()
model.field_name = 3
self.assertEqual(model.field_name, 3.0)
def test_float_field_on_class_using_float(self):
class TestModel(BaseModel):
field_name = FloatField()
model = TestModel()
model.field_name = 3.0
self.assertEqual(model.field_name, 3.0)
def test_float_field_on_class_using_str(self):
class TestModel(BaseModel):
field_name = FloatField()
model = TestModel()
model.field_name = "3.0"
self.assertEqual(model.field_name, 3.0)
def test_float_field_on_class_using_dict(self):
class TestModel(BaseModel):
field_name = FloatField()
model = TestModel()
model.field_name = {}
self.assertIsNone(model.field_name)
def test_str_field_on_class_using_int(self):
class TestModel(BaseModel):
field_name = StringField()
model = TestModel()
model.field_name = 3
self.assertEqual(model.field_name, "3")
def test_str_field_on_class_using_float(self):
class TestModel(BaseModel):
field_name = StringField()
model = TestModel()
model.field_name = 3.0
self.assertEqual(model.field_name, "3.0")
def test_str_field_on_class_using_str(self):
class TestModel(BaseModel):
field_name = StringField()
model = TestModel()
model.field_name = "aaaaa"
self.assertEqual(model.field_name, "aaaaa")
def test_str_field_on_class_using_dict(self):
class TestModel(BaseModel):
field_name = StringField()
model = TestModel()
model.field_name = {}
self.assertIsNone(model.field_name)
def test_bool_field_on_class_using_int_true(self):
class TestModel(BaseModel):
field_name = BooleanField()
model = TestModel()
model.field_name = 3
self.assertTrue(model.field_name)
def test_bool_field_on_class_using_int_false(self):
class TestModel(BaseModel):
field_name = BooleanField()
model = TestModel()
model.field_name = 0
self.assertFalse(model.field_name)
def test_bool_field_on_class_using_float(self):
class TestModel(BaseModel):
field_name = BooleanField()
model = TestModel()
model.field_name = 3.0
self.assertIsNone(model.field_name)
def test_bool_field_on_class_using_str_any(self):
class TestModel(BaseModel):
field_name = BooleanField()
model = TestModel()
model.field_name = "aaaaa"
self.assertFalse(model.field_name)
def test_bool_field_on_class_using_str_false(self):
class TestModel(BaseModel):
field_name = BooleanField()
model = TestModel()
model.field_name = "False"
self.assertFalse(model.field_name)
def test_bool_field_on_class_using_str_true(self):
class TestModel(BaseModel):
field_name = BooleanField()
model = TestModel()
model.field_name = " tRuE "
self.assertTrue(model.field_name)
def test_bool_field_on_class_using_dict(self):
class TestModel(BaseModel):
field_name = BooleanField()
model = TestModel()
model.field_name = {}
self.assertIsNone(model.field_name)
def test_int_field_delete_value(self):
class TestModel(BaseModel):
field_name = IntegerField()
model = TestModel()
model.field_name = 3
self.assertEqual(model.field_name, 3)
del model.field_name
self.assertIsNone(model.field_name)
# Check field descriptor exists
model.field_name = "3"
self.assertEqual(model.field_name, 3)
def test_int_field_bad_definition(self):
class TestModel():
field_name = IntegerField()
model = TestModel()
with self.assertRaisesRegexp(AttributeError, "Field name must be set"):
model.field_name = 3
with self.assertRaisesRegexp(AttributeError, "Field name must be set"):
model.field_name
with self.assertRaisesRegexp(AttributeError, "Field name must be set"):
del model.field_name
def test_model_field_on_class_using_int(self):
class TestModel(BaseModel):
field_name = ModelField()
model = TestModel()
model.field_name = 3
self.assertIsNone(model.field_name)
def test_model_field_on_class_using_float(self):
class TestModel(BaseModel):
field_name = ModelField()
model = TestModel()
model.field_name = 3.0
self.assertIsNone(model.field_name)
def test_model_field_on_class_using_str(self):
class TestModel(BaseModel):
field_name = ModelField()
model = TestModel()
model.field_name = "aaaaa"
self.assertIsNone(model.field_name)
def test_model_field_on_class_using_dict(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
model = TestModel()
model.field_name_1 = {"field_name_2": "ooo"}
self.assertIsInstance(model.field_name_1, TestModel)
self.assertEqual(model.field_name_1.field_name_2, "ooo")
def test_model_field_import_parent(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
model = TestModel({"field_name_1": {"field_name_2": "eee"},
"field_name_2": "ooo"})
self.assertIsInstance(model.field_name_1, TestModel)
self.assertEqual(model.field_name_2, "ooo")
self.assertEqual(model.field_name_1.field_name_2, "eee")
def test_model_field_on_class_using_model_with_original_data(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
model = TestModel({"field_name_1": {"field_name_2": "eee"},
"field_name_2": "ooo"})
model.flat_data()
model.field_name_1 = TestModel({"field_name_2": "aaa"})
self.assertIsInstance(model.field_name_1, TestModel)
self.assertEqual(model.field_name_2, "ooo")
self.assertEqual(model.field_name_1.field_name_2, "aaa")
def test_model_field_on_class_using_dict_with_original_data(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
model = TestModel({"field_name_1": {"field_name_2": "eee"},
"field_name_2": "ooo"})
model.flat_data()
model.field_name_1 = {"field_name_2": "aaa"}
self.assertIsInstance(model.field_name_1, TestModel)
self.assertEqual(model.field_name_2, "ooo")
self.assertEqual(model.field_name_1.field_name_2, "aaa")
def test_model_field_bad_definition(self):
class TestModel():
field_name = ModelField()
model = TestModel()
with self.assertRaisesRegexp(AttributeError, "Field name must be set"):
model.field_name = {}
def test_time_field_using_int(self):
field = TimeField()
self.assertFalse(field.check_value(3333))
self.assertTrue(field.can_use_value(3333))
self.assertEqual(field.use_value(3333),
datetime(year=1970, month=1,
day=1, hour=0, minute=55,
second=33, tzinfo=timezone.utc).astimezone()
.time())
def test_time_field_desc(self):
field = TimeField()
self.assertEqual(field.export_definition(), {'alias': None,
'doc': 'TimeField field',
'parse_format': None,
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE})
def test_time_field_using_float(self):
field = TimeField()
self.assertFalse(field.check_value(3.0))
self.assertFalse(field.can_use_value(3.0))
self.assertIsNone(field.use_value(3.0))
def test_time_field_using_str(self):
field = TimeField(parse_format="%H:%M:%S")
self.assertFalse(field.check_value("03:13:23"))
self.assertTrue(field.can_use_value("03:13:23"))
self.assertEqual(field.use_value("03:13:23"),
time(hour=3, minute=13, second=23))
def test_time_field_desc_w_format(self):
field = TimeField(parse_format="%H:%M:%S")
self.assertEqual(field.export_definition(), {'alias': None,
'doc': 'TimeField field',
'parse_format': "%H:%M:%S",
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE})
def test_time_field_using_bad_str(self):
field = TimeField(parse_format="%H:%M:%S")
self.assertIsNone(field.use_value("03:13:2334"))
def test_time_field_using_str_no_parse_format(self):
field = TimeField()
self.assertFalse(field.check_value("03:13:23"))
self.assertTrue(field.can_use_value("03:13:23"))
self.assertEqual(field.use_value("03:13:23"),
time(hour=3, minute=13, second=23))
def test_time_field_using_list(self):
field = TimeField()
self.assertFalse(field.check_value([3, 13, 23]))
self.assertTrue(field.can_use_value([3, 13, 23]))
self.assertEqual(field.use_value([3, 13, 23]),
time(hour=3, minute=13, second=23))
def test_time_field_using_dict(self):
field = TimeField()
self.assertFalse(field.check_value({"hour": 3, "minute": 13, "second": 23}))
self.assertTrue(field.can_use_value({"hour": 3, "minute": 13, "second": 23}))
self.assertEqual(field.use_value({"hour": 3, "minute": 13, "second": 23}),
time(hour=3, minute=13, second=23))
def test_time_field_using_datetime(self):
field = TimeField()
dt = datetime(year=1970, month=1, day=1, hour=3, minute=13, second=23)
self.assertFalse(field.check_value(dt))
self.assertTrue(field.can_use_value(dt))
self.assertEqual(field.use_value(dt),
time(hour=3, minute=13, second=23))
def test_time_field_using_time(self):
field = TimeField()
t = time(hour=3, minute=13, second=23)
self.assertTrue(field.check_value(t))
self.assertEqual(field.use_value(t),
time(hour=3, minute=13, second=23))
def test_date_field_using_int(self):
field = DateField()
self.assertFalse(field.check_value(1433333333))
self.assertTrue(field.can_use_value(1433333333))
self.assertEqual(field.use_value(1433333333),
datetime(year=2015, month=6,
day=3, hour=0, minute=15,
second=33, tzinfo=timezone.utc).astimezone()
.date())
def test_date_field_using_float(self):
field = DateField()
self.assertFalse(field.check_value(3.0))
self.assertFalse(field.can_use_value(3.0))
self.assertIsNone(field.use_value(3.0))
def test_date_field_using_str(self):
field = DateField(parse_format="%d/%m/%Y")
self.assertFalse(field.check_value("23/03/2015"))
self.assertTrue(field.can_use_value("23/03/2015"))
self.assertEqual(field.use_value("23/03/2015"),
date(year=2015, month=3, day=23))
def test_date_field_using_str_no_parse_format(self):
field = DateField()
self.assertFalse(field.check_value("23/03/2015"))
self.assertTrue(field.can_use_value("23/03/2015"))
self.assertEqual(field.use_value("23/03/2015"),
date(year=2015, month=3, day=23))
def test_date_field_using_non_parseable_str(self):
field = DateField(parse_format="%Y-%M-%d")
self.assertIsNone(field.use_value("2013-9-45"))
def test_date_field_using_list(self):
field = DateField()
self.assertFalse(field.check_value([2015, 3, 23]))
self.assertTrue(field.can_use_value([2015, 3, 23]))
self.assertEqual(field.use_value([2015, 3, 23]),
date(year=2015, month=3, day=23))
def test_date_field_using_dict(self):
field = DateField()
self.assertFalse(field.check_value({"year": 2015, "month": 3, "day": 23}))
self.assertTrue(field.can_use_value({"year": 2015, "month": 3, "day": 23}))
self.assertEqual(field.use_value({"year": 2015, "month": 3, "day": 23}),
date(year=2015, month=3, day=23))
def test_date_field_using_datetime(self):
field = DateField()
dt = datetime(year=2015, month=3, day=23, hour=3, minute=13, second=23)
self.assertFalse(field.check_value(dt))
self.assertTrue(field.can_use_value(dt))
self.assertEqual(field.use_value(dt),
datetime(year=2015, month=3, day=23).date())
def test_date_field_using_date(self):
field = DateField()
d = date(year=2015, month=3, day=23)
self.assertTrue(field.check_value(d))
self.assertEqual(field.use_value(d),
date(year=2015, month=3, day=23))
def test_datetime_field_using_int(self):
field = DateTimeField()
self.assertFalse(field.check_value(1433333333))
self.assertTrue(field.can_use_value(1433333333))
self.assertEqual(field.use_value(1433333333),
datetime(year=2015, month=6,
day=3, hour=12, minute=8,
second=53, tzinfo=timezone.utc).astimezone().replace(tzinfo=None))
def test_datetime_field_using_float(self):
field = DateTimeField()
self.assertFalse(field.check_value(3.0))
self.assertFalse(field.can_use_value(3.0))
self.assertIsNone(field.use_value(3.0))
def test_datetime_field_using_str(self):
field = DateTimeField(parse_format="%d/%m/%Y")
self.assertFalse(field.check_value("23/03/2015"))
self.assertTrue(field.can_use_value("23/03/2015"))
self.assertEqual(field.use_value("23/03/2015"),
datetime(year=2015, month=3, day=23))
def test_datetime_field_using_bad_str(self):
field = DateTimeField(parse_format="%d/%m/%Y")
self.assertIsNone(field.use_value("23/44/2015"))
def test_datetime_field_using_str_no_parse_format(self):
from dateutil.tz import tzutc
field = DateTimeField()
self.assertFalse(field.check_value('2012-09-11T13:02:41Z'))
self.assertTrue(field.can_use_value('2012-09-11T13:02:41Z'))
self.assertEqual(field.use_value('2012-09-11T13:02:41Z'),
datetime(2012, 9, 11, 13, 2, 41, tzinfo=tzutc()))
def test_datetime_field_using_list(self):
field = DateTimeField()
self.assertFalse(field.check_value([2015, 3, 23]))
self.assertTrue(field.can_use_value([2015, 3, 23]))
self.assertEqual(field.use_value([2015, 3, 23]),
datetime(year=2015, month=3, day=23))
def test_datetime_field_using_dict(self):
field = DateTimeField()
self.assertFalse(field.check_value({"year": 2015, "month": 3, "day": 23}))
self.assertTrue(field.can_use_value({"year": 2015, "month": 3, "day": 23}))
self.assertEqual(field.use_value({"year": 2015, "month": 3, "day": 23}),
datetime(year=2015, month=3, day=23))
def test_datetime_field_using_date(self):
field = DateTimeField()
d = date(year=2015, month=3, day=23)
self.assertFalse(field.check_value(d))
self.assertTrue(field.can_use_value(d))
self.assertEqual(field.use_value(d),
datetime(year=2015, month=3, day=23))
def test_datetime_field_using_datetime(self):
field = DateTimeField()
d = datetime(year=2015, month=3, day=23,
hour=0, minute=15, second=33,
tzinfo=timezone.utc)
self.assertTrue(field.check_value(d))
self.assertEqual(field.use_value(d),
datetime(year=2015, month=3, day=23,
hour=0, minute=15, second=33,
tzinfo=timezone.utc))
def test_datetime_field_using_iso8061_parser_and_formatter(self):
field = DateTimeField('iso8061')
field.date_parsers = {
'iso8061': {
'formatter': '%Y-%m-%dT%H:%M:%SZ',
'parser': iso8601.parse_date
}
}
data = '2012-09-11T13:02:41Z'
self.assertFalse(field.check_value(data))
self.assertTrue(field.can_use_value(data))
self.assertEqual(field.use_value(data), datetime(year=2012, month=9, day=11,
hour=13, minute=2, second=41,
tzinfo=timezone.utc))
def test_datetime_field_using_iso8061_without_formatter(self):
field = DateTimeField('iso8061')
field.date_parsers = {
'iso8061': {
'parser': 'bruce_wayne'
}
}
data = '2012-09-11T13:02:41Z'
self.assertFalse(field.check_value(data))
self.assertTrue(field.can_use_value(data))
self.assertIsNone(field.use_value(data))
def test_datetime_field_using_iso8061_without_parser(self):
field = DateTimeField('iso8061')
field.date_parsers = {
'iso8061': {
'formatter': '%Y-%m-%dT%H:%M:%SZ'
}
}
data = '2012-09-11T13:02:41Z'
self.assertFalse(field.check_value(data))
self.assertTrue(field.can_use_value(data))
self.assertIsNotNone(field.use_value(data))
def test_datetime_field_using_iso8061_parser_and_def_formatter(self):
def parser_format(value):
return datetime.strptime(datetime.strftime(value, '%Y-%m-%dT%H:%M:%SZ'), '%Y-%m-%dT%H:%M:%SZ')
field = DateTimeField('iso8061')
field.date_parsers = {
'iso8061': {
'formatter': parser_format,
'parser': iso8601.parse_date
}
}
data = '2012-09-11T13:02:41Z'
self.assertFalse(field.check_value(data))
self.assertTrue(field.can_use_value(data))
self.assertEqual(field.use_value(data), datetime(year=2012, month=9, day=11,
hour=13, minute=2, second=41,
tzinfo=timezone.utc))
def test_datetime_field_using_iso8061_bad_str(self):
field = DateTimeField('iso8061')
field.date_parsers = {
'iso8061': {
'formatter': '%Y-%m-%dT%H:%M:%SZ',
'parser': iso8601.parse_date
}
}
data = '2012-09-50T13:02:41Z'
self.assertFalse(field.check_value(data))
self.assertTrue(field.can_use_value(data))
self.assertIsNone(field.use_value(data))
def test_time_field_using_iso8061(self):
field = TimeField('iso8061')
field.date_parsers = {
'iso8061': {
'formatter': '%Y-%m-%dT%H:%M:%SZ',
'parser': iso8601.parse_date
}
}
data = '2012-09-11T13:02:41Z'
self.assertFalse(field.check_value(data))
self.assertTrue(field.can_use_value(data))
self.assertEqual(field.use_value(data), time(hour=13, minute=2, second=41,
tzinfo=timezone.utc))
def test_time_field_using_iso8061_bad_str(self):
field = TimeField('iso8061')
field.date_parsers = {
'iso8061': {
'formatter': '%Y-%m-%dT%H:%M:%SZ',
'parser': iso8601.parse_date
}
}
data = '2012-09-50T13:02:41Z'
self.assertFalse(field.check_value(data))
self.assertTrue(field.can_use_value(data))
self.assertIsNone(field.use_value(data))
def test_date_field_using_iso8061(self):
field = DateField('iso8061')
field.date_parsers = {
'iso8061': {
'formatter': '%Y-%m-%dT%H:%M:%SZ',
'parser': iso8601.parse_date
}
}
data = '2012-09-11T13:02:41Z'
self.assertFalse(field.check_value(data))
self.assertTrue(field.can_use_value(data))
self.assertEqual(field.use_value(data), date(year=2012, month=9, day=11))
def test_date_field_using_is8061_bad_str(self):
field = DateField('iso8061')
field.date_parsers = {
'iso8061': {
'formatter': '%Y-%m-%dT%H:%M:%SZ',
'parser': iso8601.parse_date
}
}
data = '2012-09-50T13:02:41Z'
self.assertFalse(field.check_value(data))
self.assertTrue(field.can_use_value(data))
self.assertIsNone(field.use_value(data))
def test_datetime_field_using_iso8061_def_format(self):
def get_format(value):
format = '%Y-%m-%dT%H:%M:%SZ'
return datetime.strftime(value, format)
field = DateTimeField('iso8061')
field.date_parsers = {
'iso8061': {
'formatter': get_format,
'parser': iso8601.parse_date
}
}
data = datetime(year=2012, month=9, day=11,
hour=13, minute=2, second=41,
tzinfo=timezone.utc)
self.assertEqual(field.get_formatted_value(data), '2012-09-11T13:02:41Z')
def test_date_field_using_iso8061_bad_format_str(self):
field = DateTimeField()
data = datetime(year=2012, month=9, day=11,
hour=13, minute=2, second=41,
tzinfo=timezone.utc)
self.assertEqual(field.get_formatted_value(data), '2012-09-11 13:02:41+00:00')
def test_date_field_using_iso8061_format_str(self):
field = DateTimeField('iso8061')
field.date_parsers = {
'iso8061': {
'formatter': '%Y-%m-%dT%H:%M:%SZ',
'parser': iso8601.parse_date
}
}
data = datetime(year=2012, month=9, day=11,
hour=13, minute=2, second=41,
tzinfo=timezone.utc)
self.assertEqual(field.get_formatted_value(data), '2012-09-11T13:02:41Z')
def test_datetime_field_using_parser_as_formatter(self):
field = DateTimeField('%Y-%m-%dT%H:%M:%SZ-test')
data = datetime(year=2012, month=9, day=11,
hour=13, minute=2, second=41,
tzinfo=timezone.utc)
self.assertEqual(field.get_formatted_value(data), '2012-09-11T13:02:41Z-test')
def test_datetime_field_using_dict_as_parser(self):
field = DateTimeField({'parser': '%Y-%m-%dT%H:%M:%SZ-test',
'formatter': '%Y-%m-%dT%H:%M:%SZ-test2'})
data = datetime(year=2012, month=9, day=11,
hour=13, minute=2, second=41)
self.assertEqual(field.get_parsed_value('2012-09-11T13:02:41Z-test'), data)
self.assertEqual(field.get_formatted_value(data), '2012-09-11T13:02:41Z-test2')
def test_datetime_field_using_dict_as_parser_default_formatter(self):
field = DateTimeField({'parser': '%Y-%m-%dT%H:%M:%SZ-test'})
data = datetime(year=2012, month=9, day=11,
hour=13, minute=2, second=41)
self.assertEqual(field.get_parsed_value('2012-09-11T13:02:41Z-test'), data)
self.assertEqual(field.get_formatted_value(data), '2012-09-11T13:02:41Z-test')
def test_datetime_field_using_dict_neither_parser_or_formatter(self):
field = DateTimeField({})
data = datetime(year=2012, month=9, day=11,
hour=13, minute=2, second=41,
tzinfo=timezone.utc)
self.assertIsNone(field.get_parsed_value('2012-09-11T13:02:41Z-test'))
self.assertEqual(field.get_formatted_value(data), '2012-09-11 13:02:41+00:00')
def test_datetime_field_using_default_parser_formatter(self):
field = DateTimeField()
data = datetime(year=2012, month=9, day=11,
hour=13, minute=2, second=41,
tzinfo=timezone.utc)
self.assertEqual(field.get_parsed_value('2012-09-11T13:02:41Z'), data)
self.assertEqual(field.get_formatted_value(data), '2012-09-11 13:02:41+00:00')
def test_model_field_desc(self):
class TestModel(BaseModel):
field_name = StringIdField()
field = ModelField(model_class=TestModel)
self.assertEqual(field.export_definition(),
{'alias': None,
'doc': 'ModelField field (:class:`tests.dirty_models.tests_fields.TestModel`)',
'model_class': TestModel,
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE})
def test_array_field(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
test_model_1 = TestModel()
test_model_1.field_name_2 = "hello"
test_model_1.field_name_1 = TestModel()
array_model.array_field = set([test_model_1])
self.assertEqual(ListModel([test_model_1]).export_data(), array_model.array_field.export_data())
self.assertEqual(test_model_1, array_model.array_field[0])
def test_array_field_with_ListModel(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
test_model_1 = TestModel()
test_model_1.field_name_2 = "hello"
test_model_1.field_name_1 = TestModel()
array_model.array_field = ListModel([test_model_1])
self.assertEqual(test_model_1, array_model.array_field[0])
def test_array_field_extend(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
test_model_1 = TestModel()
array_model.array_field = ListModel([test_model_1])
test_model_2 = TestModel()
array_model.array_field.extend(set([test_model_2]))
self.assertEqual(test_model_1, array_model.array_field[0])
self.assertEqual(test_model_2, array_model.array_field[1])
def test_array_field_invalid_value_to_add(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
test_model_1 = TestModel()
array_model.array_field = ListModel([test_model_1])
array_model.array_field.extend(ListModel(["This is not a valid model"]))
self.assertEqual(test_model_1, array_model.array_field[0])
self.assertRaises(IndexError, array_model.array_field.__getitem__, 1)
def test_array_field_invalid_value_set(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
array_model.array_field = ["Unexpected string", TestModel()]
self.assertEqual(1, len(array_model.array_field))
def test_array_field_not_iterable(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=BooleanField())
model = ArrayModel()
model.array_field = "This is not a list"
self.assertIsNone(model.array_field)
def test_array_field_list_invalid_types(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=IntegerField())
model = ArrayModel()
model.array_field = ["This is not a list", "This neither"]
self.assertIsNone(model.array_field)
def test_array_field_conversion(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=IntegerField())
model = ArrayModel()
model.array_field = ["This is not a list", "2"]
self.assertEquals(model.array_field[0], 2)
def test_array_set_value_list_field(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=IntegerField())
model = ArrayModel()
model.array_field = ListModel(["this is not an integer"], field_type=IntegerField())
self.assertEqual(0, len(model.array_field))
def test_array_set_value_list_field_valid_and_convertible(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=IntegerField())
model = ArrayModel()
model.array_field = ListModel(["3"], field_type=IntegerField())
self.assertEqual(1, len(model.array_field))
def test_array_del(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
test_model_1 = TestModel()
array_model.array_field = [test_model_1]
self.assertEqual(0, array_model.array_field.index(test_model_1))
del array_model.array_field[0]
self.assertEqual(0, len(array_model.array_field))
def test_array_model_export_data(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
test_model_1 = TestModel()
test_model_1.field_name_1 = TestModel()
test_model_1.field_name_2 = "Test model 1"
test_model_2 = TestModel()
test_model_2.field_name_1 = TestModel()
test_model_2.field_name_2 = "Test model 2"
array_model.array_field = [test_model_1, test_model_2]
expected_data = {
"array_field": [{"field_name_1": {}, "field_name_2": "Test model 1"},
{"field_name_1": {}, "field_name_2": "Test model 2"}]
}
self.assertEqual(expected_data, array_model.export_data())
def test_array_model_export_data_integers(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=IntegerField())
model = ArrayModel()
model.array_field = ["3", 4]
self.assertEqual({"array_field": [3, 4]}, model.export_data())
def test_array_model_export_data_not_modified(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
test_model_1 = TestModel()
test_model_1.field_name_1 = TestModel()
test_model_1.field_name_2 = "Test model 1"
test_model_2 = TestModel()
test_model_2.field_name_1 = TestModel()
test_model_2.field_name_2 = "Test model 2"
array_model.array_field = [test_model_1, test_model_2]
array_model.flat_data()
expected_data = {
"array_field": [{"field_name_1": {}, "field_name_2": "Test model 1"},
{"field_name_1": {}, "field_name_2": "Test model 2"}]
}
self.assertEqual(expected_data, array_model.export_data())
def test_array_model_export_data_unitialised(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=IntegerField())
model = ArrayModel()
model.array_field = ["3", 4]
model.array_field.clear()
self.assertEqual({"array_field": []}, model.export_data())
def test_array_model_export_modified_data(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
test_model_1 = TestModel()
test_model_1.field_name_1 = TestModel()
test_model_1.field_name_2 = "Test model 1"
test_model_2 = TestModel()
test_model_2.field_name_1 = TestModel()
test_model_2.field_name_2 = "Test model 2"
array_model.array_field = [test_model_1, test_model_2]
expected_data = {
"array_field": [{"field_name_1": {}, "field_name_2": "Test model 1"},
{"field_name_1": {}, "field_name_2": "Test model 2"}]
}
self.assertEqual(expected_data, array_model.export_modified_data())
def test_array_model_export_modified_data_flattered(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
test_model_1 = TestModel()
test_model_1.field_name_1 = TestModel()
test_model_1.field_name_2 = "Test model 1"
test_model_2 = TestModel()
test_model_2.field_name_1 = TestModel()
test_model_2.field_name_2 = "Test model 2"
array_model.array_field = [test_model_1, test_model_2]
array_model.flat_data()
self.assertEqual({}, array_model.export_modified_data())
def test_array_model_export_modified_data_integers(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=IntegerField())
model = ArrayModel()
model.array_field = ["3", 4]
self.assertEqual({"array_field": [3, 4]}, model.export_modified_data())
def test_array_model_export_modified_data_unitialised(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=IntegerField())
model = ArrayModel()
model.array_field = ["3", 4]
model.array_field.clear()
self.assertEqual({"array_field": []}, model.export_modified_data())
def test_array_model_import_data(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=IntegerField())
array_model = ArrayModel()
array_model.import_data({"array_field": [1, 2, 3, 4]})
self.assertEqual(4, len(array_model.array_field))
def test_array_model_empty(self):
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=IntegerField())
array_model = ArrayModel()
array_model.array_field = []
self.assertIsInstance(array_model.array_field, ListModel)
self.assertListEqual(array_model.array_field.export_data(), [])
def test_array_model_with_model_field_no_model_class(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
array_model_indented_1 = TestModel({'field_name_2': 'aaaa'})
array_model_indented_2 = TestModel({'field_name_2': 'bbbb'})
array_model.array_field = [array_model_indented_1, array_model_indented_2, "not valid :)"]
self.assertEqual(list(array_model.array_field), [array_model_indented_1, array_model_indented_2])
def test_array_model_export_modified_data_model_inside(self):
class TestModel(BaseModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=ModelField(model_class=TestModel))
array_model = ArrayModel()
array_model_indented_1 = TestModel({'field_name_2': 'aaaa'})
array_model_indented_2 = TestModel({'field_name_2': 'bbbb'})
array_model.array_field = [array_model_indented_1, array_model_indented_2]
array_model.flat_data()
array_model_indented_1.field_name_2 = 'cccc'
self.assertDictEqual(array_model.export_modified_data(), {'array_field': [{'field_name_2': 'cccc'}, {}]})
def test_array_field_desc(self):
field = ArrayField(field_type=IntegerField(read_only=True))
self.assertEqual(field.export_definition(), {
'alias': None,
'doc': 'Array of IntegerField field [READ ONLY]',
'field_type': (IntegerField, {'alias': None,
'doc': 'IntegerField field [READ ONLY]',
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_ONLY}),
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE})
def test_array_field_from_desc(self):
field = ArrayField(field_type=(IntegerField, {'alias': None,
'doc': 'IntegerField field [READ ONLY]',
'name': None,
'read_only': True}))
self.assertEqual(field.export_definition(), {
'alias': None,
'doc': 'Array of IntegerField field [READ ONLY]',
'field_type': (IntegerField, {'alias': None,
'doc': 'IntegerField field [READ ONLY]',
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_ONLY}),
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE})
def test_hashmap_field(self):
class FakeHashMapModel(HashMapModel):
field_name_1 = ModelField()
field_name_2 = StringField()
class TestModel(BaseModel):
hashmap_field = HashMapField(field_type=IntegerField(), model_class=FakeHashMapModel)
hash_model = TestModel()
hash_model.hashmap_field = {'field_name_1': {'field_name_2': 'aaaa'},
'field_name_2': 'cccc',
'field_hash_1': '34'}
self.assertIsInstance(hash_model.hashmap_field, FakeHashMapModel)
self.assertEqual(hash_model.hashmap_field.field_name_1.field_name_2, 'aaaa')
self.assertEqual(hash_model.hashmap_field.field_name_2, 'cccc')
self.assertEqual(hash_model.hashmap_field.field_hash_1, 34)
def test_hashmap_field_dyn(self):
class TestModel(BaseModel):
hashmap_field = HashMapField(field_type=IntegerField())
hash_model = TestModel()
hash_model.hashmap_field = {'field_name_1': 3,
'field_name_2': 4,
'field_hash_1': '34'}
self.assertIsInstance(hash_model.hashmap_field, HashMapModel)
self.assertEqual(hash_model.hashmap_field.field_name_1, 3)
self.assertEqual(hash_model.hashmap_field.field_name_2, 4)
self.assertEqual(hash_model.hashmap_field.field_hash_1, 34)
class ArrayOfStringFieldTests(TestCase):
def setUp(self):
super(ArrayOfStringFieldTests, self).setUp()
class ArrayModel(BaseModel):
array_field = ArrayField(field_type=StringField(), autolist=True)
self.model = ArrayModel()
def test_array_field(self):
self.model.array_field = ['foo', 'bar']
self.assertEqual(self.model.export_data(), {'array_field': ['foo', 'bar']})
def test_array_field_tuple(self):
self.model.array_field = 'foo',
self.assertEqual(self.model.export_data(), {'array_field': ['foo']})
def test_array_field_autolist(self):
self.model.array_field = 'foo'
self.assertEqual(self.model.export_data(), {'array_field': ['foo']})
def test_array_field_no_autolist(self):
self.model.__class__.__dict__['array_field'].autolist = False
self.model.array_field = 'foo'
self.assertEqual(self.model.export_data(), {})
class IntegerFieldTests(TestCase):
class TestEnum(Enum):
value_1 = 1
value_2 = '2'
value_3 = 3.2
value_4 = 'value'
def test_using_int(self):
field = IntegerField()
self.assertTrue(field.check_value(3))
self.assertEqual(field.use_value(3), 3)
def test_desc(self):
field = IntegerField()
self.assertEqual(field.export_definition(), {'alias': None,
'doc': 'IntegerField field',
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE})
def test_using_float(self):
field = IntegerField()
self.assertFalse(field.check_value(3.0))
self.assertTrue(field.can_use_value(3.0))
self.assertEqual(field.use_value(3.0), 3)
def test_using_str(self):
field = IntegerField()
self.assertFalse(field.check_value("3"))
self.assertTrue(field.can_use_value("3"))
self.assertEqual(field.use_value("3"), 3)
def test_using_dict(self):
field = IntegerField()
self.assertFalse(field.check_value({}))
self.assertFalse(field.can_use_value({}))
def test_using_int_enum(self):
field = IntegerField()
self.assertFalse(field.check_value(self.TestEnum.value_1))
self.assertTrue(field.can_use_value(self.TestEnum.value_1))
self.assertEqual(field.use_value(self.TestEnum.value_1), 1)
def test_using_str_enum(self):
field = IntegerField()
self.assertFalse(field.check_value(self.TestEnum.value_2))
self.assertTrue(field.can_use_value(self.TestEnum.value_2))
self.assertEqual(field.use_value(self.TestEnum.value_2), 2)
def test_using_float_enum(self):
field = IntegerField()
self.assertFalse(field.check_value(self.TestEnum.value_3))
self.assertTrue(field.can_use_value(self.TestEnum.value_3))
self.assertEqual(field.use_value(self.TestEnum.value_3), 3)
def test_using_str_enum_fail(self):
field = IntegerField()
self.assertFalse(field.check_value(self.TestEnum.value_4))
self.assertFalse(field.can_use_value(self.TestEnum.value_4))
def test_using_enum_fail(self):
field = IntegerField()
self.assertFalse(field.check_value(self.TestEnum))
self.assertFalse(field.can_use_value(self.TestEnum))
class MultiTypeFieldSimpleTypesTests(TestCase):
def setUp(self):
super(MultiTypeFieldSimpleTypesTests, self).setUp()
class MultiTypeModel(BaseModel):
multi_field = MultiTypeField(field_types=[IntegerField(), StringField()])
self.model = MultiTypeModel()
def test_string_field(self):
self.model.multi_field = 'foo'
self.assertEqual(self.model.multi_field, 'foo')
def test_integer_field(self):
self.model.multi_field = 3
self.assertEqual(self.model.multi_field, 3)
def test_update_string_field(self):
self.model.multi_field = 3
self.model.flat_data()
self.model.multi_field = 'foo'
self.assertEqual(self.model.multi_field, 'foo')
def test_update_integer_field(self):
self.model.multi_field = 'foo'
self.model.flat_data()
self.model.multi_field = 3
self.assertEqual(self.model.multi_field, 3)
def test_no_update_integer_field(self):
self.model.multi_field = 3
self.model.flat_data()
self.model.multi_field = [3, 4]
self.assertEqual(self.model.multi_field, 3)
def test_integer_field_use_float(self):
self.model.multi_field = 3.0
self.assertEqual(self.model.multi_field, 3)
def test_string_field_conversion_priority(self):
self.model.multi_field = '3'
self.assertEqual(self.model.multi_field, '3')
def test_multi_field_desc(self):
self.maxDiff = None
field = MultiTypeField(field_types=[IntegerField(), StringField()])
self.assertEqual(field.export_definition(), {
'alias': None,
'doc': "\n".join(['Multiple type values are allowed:',
'',
'* IntegerField field',
'',
'* StringField field']),
'field_types': [(IntegerField, {'alias': None,
'doc': 'IntegerField field',
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE}),
(StringField, {'alias': None,
'doc': 'StringField field',
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE})],
'name': None,
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE})
class MultiTypeFieldComplexTypesTests(TestCase):
def setUp(self):
super(MultiTypeFieldComplexTypesTests, self).setUp()
class MultiTypeModel(BaseModel):
multi_field = MultiTypeField(field_types=[IntegerField(), (ArrayField, {"field_type": StringField()})])
self.model = MultiTypeModel()
def test_integer_field(self):
self.model.multi_field = 3
self.assertEqual(self.model.multi_field, 3)
def test_array_field(self):
self.model.multi_field = ['foo', 'bar']
self.assertEqual(self.model.multi_field.export_data(), ['foo', 'bar'])
def test_update_array_field(self):
self.model.multi_field = 3
self.model.flat_data()
self.model.multi_field = ['foo', 'bar']
self.assertEqual(self.model.multi_field.export_data(), ['foo', 'bar'])
def test_update_integer_field(self):
self.model.multi_field = ['foo', 'bar']
self.model.flat_data()
self.model.multi_field = 3
self.assertEqual(self.model.multi_field, 3)
def test_get_field_type_by_value(self):
multi_field = MultiTypeField(field_types=[IntegerField(), (ArrayField, {"field_type": StringField()})])
self.assertIsInstance(multi_field.get_field_type_by_value(['foo', 'bar']),
ArrayField)
self.assertIsInstance(multi_field.get_field_type_by_value(3),
IntegerField)
def test_get_field_type_by_value_fail(self):
multi_field = MultiTypeField(field_types=[IntegerField(), (ArrayField, {"field_type": StringField()})])
with self.assertRaises(TypeError):
multi_field.get_field_type_by_value({})
class AutoreferenceModelFieldTests(TestCase):
def setUp(self):
super(AutoreferenceModelFieldTests, self).setUp()
class AutoreferenceModel(BaseModel):
multi_field = MultiTypeField(field_types=[IntegerField(), (ArrayField, {"field_type": ModelField()})])
array_of_array = ArrayField(field_type=ArrayField(field_type=ModelField()))
test_field = IntegerField()
self.model = AutoreferenceModel()
def test_model_reference(self):
self.model.import_data({'multi_field': [{'test_field': 1}, {'test_field': 2}],
'array_of_array': [[{'test_field': 3}]]})
self.assertIsInstance(self.model.multi_field[0], self.model.__class__)
self.assertIsInstance(self.model.multi_field[1], self.model.__class__)
self.assertIsInstance(self.model.array_of_array[0][0], self.model.__class__)
class TimedeltaFieldTests(TestCase):
def setUp(self):
self.field = TimedeltaField()
def test_check_value_success(self):
self.assertTrue(self.field.check_value(timedelta(seconds=0)))
def test_check_value_fail(self):
self.assertFalse(self.field.check_value(12))
def test_can_use_value_int(self):
self.assertTrue(self.field.can_use_value(12))
def test_can_use_value_float(self):
self.assertTrue(self.field.can_use_value(12.11))
def test_can_use_value_fail(self):
self.assertFalse(self.field.can_use_value('test'))
def test_convert_value_int(self):
self.assertTrue(self.field.convert_value(12), timedelta(seconds=12))
def test_convert_value_float(self):
self.assertTrue(self.field.convert_value(12.11), timedelta(seconds=12, milliseconds=110))
class DateTimeFieldWithTimezoneTests(TestCase):
def test_no_timezone_none(self):
class Model(BaseModel):
date_time_field = DateTimeField()
model = Model(date_time_field=datetime(year=2016, month=7, day=21, hour=12, minute=23))
self.assertEqual(model.date_time_field, datetime(year=2016, month=7, day=21, hour=12, minute=23))
self.assertIsNone(model.date_time_field.tzinfo)
def test_no_timezone_europe(self):
class Model(BaseModel):
date_time_field = DateTimeField()
model = Model(date_time_field=datetime(year=2016, month=7, day=21,
hour=12, minute=23, tzinfo=tz.gettz('Europe/Amsterdam')))
self.assertEqual(model.date_time_field, datetime(year=2016, month=7, day=21, hour=12, minute=23,
tzinfo=tz.gettz('Europe/Amsterdam')))
self.assertEqual(model.date_time_field.tzinfo, tz.gettz('Europe/Amsterdam'))
def test_with_default_timezone_utc(self):
class Model(BaseModel):
date_time_field = DateTimeField(default_timezone=timezone.utc)
model = Model(date_time_field=datetime(year=2016, month=7, day=21, hour=12, minute=23))
self.assertEqual(model.date_time_field, datetime(year=2016, month=7, day=21,
hour=12, minute=23, tzinfo=timezone.utc))
self.assertEqual(model.date_time_field.tzinfo, timezone.utc)
def test_with_default_timezone_utc_no_changed(self):
class Model(BaseModel):
date_time_field = DateTimeField(default_timezone=timezone.utc)
model = Model(date_time_field=datetime(year=2016, month=7, day=21, hour=12, minute=23,
tzinfo=tz.gettz('Europe/Amsterdam')))
self.assertEqual(model.date_time_field, datetime(year=2016, month=7, day=21,
hour=12, minute=23, tzinfo=tz.gettz('Europe/Amsterdam')))
self.assertEqual(model.date_time_field.tzinfo, tz.gettz('Europe/Amsterdam'))
def test_with_default_timezone_europe(self):
class Model(BaseModel):
date_time_field = DateTimeField(default_timezone=tz.gettz('Europe/Amsterdam'))
model = Model(date_time_field=datetime(year=2016, month=7, day=21, hour=12, minute=23))
self.assertEqual(model.date_time_field, datetime(year=2016, month=7, day=21,
hour=12, minute=23, tzinfo=tz.gettz('Europe/Amsterdam')))
self.assertEqual(model.date_time_field.tzinfo, tz.gettz('Europe/Amsterdam'))
def test_with_default_force_timezone_utc(self):
class Model(BaseModel):
date_time_field = DateTimeField(default_timezone=timezone.utc, force_timezone=True)
model = Model(date_time_field=datetime(year=2016, month=7, day=21,
hour=12, minute=23, tzinfo=tz.gettz('Europe/Amsterdam')))
self.assertEqual(model.date_time_field,
datetime(year=2016, month=7, day=21,
hour=12, minute=23,
tzinfo=tz.gettz('Europe/Amsterdam')).astimezone(timezone.utc))
self.assertEqual(model.date_time_field.tzinfo, timezone.utc)
def test_export_definition(self):
field = DateTimeField(name='test_field', alias=[], default_timezone=timezone.utc, force_timezone=True)
self.assertEqual(field.export_definition(),
{'alias': [], 'parse_format': None,
'doc': 'DateTimeField field',
'force_timezone': True,
'default_timezone': timezone.utc,
'name': 'test_field',
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE},
field.export_definition())
class TimeFieldWithTimezoneTests(TestCase):
def test_no_timezone_none(self):
class Model(BaseModel):
time_field = TimeField()
model = Model(time_field=time(hour=12, minute=23))
self.assertEqual(model.time_field, time(hour=12, minute=23))
self.assertIsNone(model.time_field.tzinfo)
def test_no_timezone_europe(self):
class Model(BaseModel):
time_field = TimeField()
model = Model(time_field=time(hour=12, minute=23, tzinfo=tz.gettz('Europe/Amsterdam')))
self.assertEqual(model.time_field, time(hour=12, minute=23,
tzinfo=tz.gettz('Europe/Amsterdam')))
self.assertEqual(model.time_field.tzinfo, tz.gettz('Europe/Amsterdam'))
def test_with_default_timezone_utc(self):
class Model(BaseModel):
time_field = TimeField(default_timezone=timezone.utc)
model = Model(time_field=time(hour=12, minute=23))
self.assertEqual(model.time_field, time(hour=12, minute=23, tzinfo=timezone.utc))
self.assertEqual(model.time_field.tzinfo, timezone.utc)
def test_with_default_timezone_utc_no_changed(self):
class Model(BaseModel):
time_field = TimeField(default_timezone=timezone.utc)
model = Model(time_field=time(hour=12, minute=23,
tzinfo=tz.gettz('Europe/Amsterdam')))
self.assertEqual(model.time_field, time(hour=12, minute=23, tzinfo=tz.gettz('Europe/Amsterdam')))
self.assertEqual(model.time_field.tzinfo, tz.gettz('Europe/Amsterdam'))
def test_export_definition(self):
field = TimeField(name='test_field', alias=[], default_timezone=timezone.utc)
self.assertEqual(field.export_definition(),
{'alias': [], 'parse_format': None,
'doc': 'TimeField field',
'default_timezone': timezone.utc,
'name': 'test_field',
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE},
field.export_definition())
class EnumFieldTests(TestCase):
class TestEnum(Enum):
value_1 = 'value1'
value_2 = 2
def setUp(self):
self.field = EnumField(name='test_field', alias=[], enum_class=self.TestEnum)
def test_check_value(self):
self.assertTrue(self.field.check_value(self.TestEnum.value_1))
self.assertTrue(self.field.check_value(self.TestEnum.value_2))
def test_check_value_fail(self):
self.assertFalse(self.field.check_value('value_1'))
self.assertFalse(self.field.check_value(2))
def test_can_use_value_check_values(self):
self.assertTrue(self.field.can_use_value('value1'))
self.assertTrue(self.field.can_use_value(2))
def test_can_use_value_check_member_names(self):
self.assertTrue(self.field.can_use_value('value_1'))
self.assertTrue(self.field.can_use_value('value_2'))
def test_can_use_value_check_values_fail(self):
self.assertFalse(self.field.can_use_value('value2'))
self.assertFalse(self.field.can_use_value(3))
def test_convert_value_from_values(self):
self.assertEqual(self.field.convert_value('value1'), self.TestEnum.value_1)
self.assertEqual(self.field.convert_value(2), self.TestEnum.value_2)
def test_convert_value_from_member_names(self):
self.assertEqual(self.field.convert_value('value_1'), self.TestEnum.value_1)
self.assertEqual(self.field.convert_value('value_2'), self.TestEnum.value_2)
def test_export_definition(self):
self.assertEqual(self.field.export_definition(),
{'alias': [],
'doc': 'EnumField field (:class:`{0}`)'.format('.'.join([self.TestEnum.__module__,
self.TestEnum.__name__])),
'enum_class': self.TestEnum,
'name': 'test_field',
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE},
self.field.export_definition())
def test_export_data(self):
class Model(BaseModel):
field = EnumField(enum_class=self.TestEnum)
model = Model(field=self.TestEnum.value_1)
self.assertEqual(model.export_data(), {'field': self.TestEnum.value_1})
def test_multitype_export_data(self):
class Model(BaseModel):
field = MultiTypeField(field_types=[EnumField(enum_class=self.TestEnum),
ArrayField(field_type=EnumField(enum_class=self.TestEnum))])
model = Model()
model.field = self.TestEnum.value_1
self.assertEqual(model.export_data(), {'field': self.TestEnum.value_1})
def test_multitype_export_data_inverted(self):
class Model(BaseModel):
field = MultiTypeField(field_types=[ArrayField(field_type=EnumField(enum_class=self.TestEnum)),
EnumField(enum_class=self.TestEnum)])
model = Model()
model.field = self.TestEnum.value_1
self.assertEqual(model.export_data(), {'field': self.TestEnum.value_1})
def test_multitype_export_data_array(self):
class Model(BaseModel):
field = MultiTypeField(field_types=[EnumField(enum_class=self.TestEnum),
ArrayField(field_type=EnumField(enum_class=self.TestEnum))])
model = Model()
model.field = [self.TestEnum.value_1, ]
self.assertEqual(model.export_data(), {'field': [self.TestEnum.value_1, ]})
def test_multitype_export_data_array_inverted(self):
class Model(BaseModel):
field = MultiTypeField(field_types=[ArrayField(field_type=EnumField(enum_class=self.TestEnum)),
EnumField(enum_class=self.TestEnum)])
model = Model()
model.field = [self.TestEnum.value_1, ]
self.assertEqual(model.export_data(), {'field': [self.TestEnum.value_1, ]})
class BytesFieldTests(TestCase):
def setUp(self):
self.field = BytesField(name='test_field', alias=[])
def test_check_value(self):
self.assertTrue(self.field.check_value(b'2332345as'))
def test_check_value_fail(self):
self.assertFalse(self.field.check_value('2332345as'))
self.assertFalse(self.field.check_value(12))
self.assertFalse(self.field.check_value(12.3))
self.assertFalse(self.field.check_value(bytearray([12, 43, 52])))
self.assertFalse(self.field.check_value([12, 43, 52]))
self.assertFalse(self.field.check_value({'sasa': 'asasas'}))
def test_can_use_value_check_values(self):
self.assertTrue(self.field.can_use_value('2332345as'))
self.assertTrue(self.field.can_use_value(12))
self.assertTrue(self.field.can_use_value(bytearray([12, 43, 52])))
self.assertTrue(self.field.can_use_value([12, 43, 52]))
self.assertTrue(self.field.can_use_value(ListModel([12, 43, 52])))
def test_can_use_value_check_values_fail(self):
self.assertFalse(self.field.can_use_value(12.3))
self.assertFalse(self.field.can_use_value({'sasa': 'asasas'}))
def test_convert_value_from_values(self):
self.assertEqual(self.field.convert_value('2332345as'), b'2332345as')
self.assertEqual(self.field.convert_value(12), b'\x0c')
self.assertEqual(self.field.convert_value(bytearray([12, 43, 52])), b'\x0c+4')
self.assertEqual(self.field.convert_value([12, 43, 52]), b'\x0c+4')
self.assertEqual(self.field.convert_value(ListModel([12, 43, 52])), b'\x0c+4')
def test_convert_value_from_invalid_values(self):
self.assertIsNone(self.field.convert_value(ListModel([{'ass': 'as'}])))
def test_export_definition(self):
self.assertEqual(self.field.export_definition(),
{'alias': [],
'doc': 'BytesField field',
'name': 'test_field',
'default': None,
'metadata': None,
'json_schema': None,
'title': None,
'access_mode': AccessMode.READ_AND_WRITE},
self.field.export_definition())
class BaseFieldDocTests(TestCase):
def test_doc(self):
field = BaseField(name='test_field')
self.assertEquals(field.get_field_docstring(), 'BaseField field')
def test_doc_writable_only_on_creation(self):
field = BaseField(name='test_field', access_mode=AccessMode.WRITABLE_ONLY_ON_CREATION)
self.assertEquals(field.get_field_docstring(), 'BaseField field [WRITABLE ONLY ON CREATION]')
def test_doc_read_only(self):
field = BaseField(name='test_field', access_mode=AccessMode.READ_ONLY)
self.assertEquals(field.get_field_docstring(), 'BaseField field [READ ONLY]')
def test_doc_hidden(self):
field = BaseField(name='test_field', access_mode=AccessMode.HIDDEN)
self.assertEquals(field.get_field_docstring(), 'BaseField field [HIDDEN]')
class BaseFieldOldReadOnlyTests(TestCase):
def test_old_read_only_true(self):
field = BaseField(name='test_field', read_only=True)
self.assertEquals(field.access_mode, AccessMode.READ_ONLY)
def test_old_read_only_false(self):
field = BaseField(name='test_field', read_only=False)
self.assertEquals(field.access_mode, AccessMode.READ_AND_WRITE)
| bsd-2-clause | 3,501,055,680,036,023,000 | 38.268047 | 119 | 0.574093 | false |
endlessm/chromium-browser | native_client/pnacl/scripts/llvm-test.py | 1 | 21139 | #!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script runs the LLVM regression tests and the LLVM testsuite.
These tests are tightly coupled to the LLVM build, and require that
LLVM has been built on this host by build.sh. It also assumes that
the test suite source has been checked out using gclient (build.sh
git-sync).
The testsuite must be configured, then run, then reported.
Currently it requires clean in between runs of different arches.
The regression tests require nothing more than running 'make check'
in the build directory, but currently not all of the upstream tests
pass in our source tree, so we currently use the same
known-failures mechanism that the testsuite uses. Once we eliminate
the locally-caused failures, we should expect 'make check' to
always pass and can get rid of the regression known failures.
"""
from __future__ import print_function
import contextlib
import datetime
import os
import optparse
import shutil
import subprocess
import sys
import parse_llvm_test_report
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import pynacl.platform
@contextlib.contextmanager
def remember_cwd():
"""Provides a shell 'pushd'/'popd' pattern.
Use as:
with remember_cwd():
os.chdir(...)
...
# Original cwd restored here
"""
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
def ParseCommandLine(argv):
usage = """%prog [options]
Specify the tests or test subsets in the options; common tests are
--llvm-regression and --testsuite-all.
The --opt arguments control the frontend/backend optimization flags.
The default set is {O3f,O2b}, other options are {O0f,O0b,O2b_sz,O0b_sz}.
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option('--arch', dest='arch',
help=('Architecture to test, e.g. x86-32, x86-64, arm; ' +
'required for most tests'))
parser.add_option('--opt', dest='opt_attributes', action='append',
default=[],
help=('Add optimization level attribute of ' +
'test configuration'))
parser.add_option('--llvm-regression', dest='run_llvm_regression',
action='store_true', default=False,
help='Run the LLVM regression tests')
parser.add_option('--libcxx-tests', dest='run_libcxx_tests',
action='store_true', default=False,
help='Run the libc++ tests')
parser.add_option('--testsuite-clean', dest='testsuite_clean',
action='store_true', default=False,
help='Clean the testsuite build directory')
parser.add_option('--testsuite-prereq', dest='testsuite_prereq',
action='store_true', default=False,
help='Build the testsuite prerequisites')
parser.add_option('--testsuite-configure', dest='testsuite_configure',
action='store_true', default=False,
help='Configure the testsuite build directory')
parser.add_option('--testsuite-run', dest='testsuite_run',
action='store_true', default=False,
help='Run the testsuite (requires <arch> argument)')
parser.add_option('--testsuite-report', dest='testsuite_report',
action='store_true', default=False,
help=('Generate the testsuite report ' +
'(requires <arch> argument)'))
parser.add_option('--testsuite-all', dest='testsuite_all',
action='store_true', default=False,
help='Run all testsuite steps (requires <arch> argument)')
parser.add_option('--llvm-buildpath', dest='llvm_buildpath',
help='Path to the LLVM build directory')
parser.add_option('-v', '--verbose', action='store_true',
default=False, dest='verbose',
help=('[--testsuite-report/regression option] ' +
'Print compilation/run logs of failing tests in'
'testsuite report and print all regression output'))
# The following options are specific to parse_llvm_test_report.
parser.add_option('-x', '--exclude', action='append', dest='excludes',
default=[],
help=('[--testsuite-report option] ' +
'Add list of excluded tests (expected fails)'))
parser.add_option('-c', '--check-excludes', action='store_true',
default=False, dest='check_excludes',
help=('[--testsuite-report option] ' +
'Report tests which unexpectedly pass'))
parser.add_option('-p', '--build-path', dest='buildpath',
help=('[--testsuite-report option] ' +
'Path to test-suite build directory'))
parser.add_option('-a', '--attribute', dest='attributes', action='append',
default=[],
help=('[--testsuite-report option] ' +
'Add attribute of test configuration (e.g. arch)'))
parser.add_option('-t', '--testsuite', action='store_true', dest='testsuite',
default=False,
help=('[--testsuite-report option] ' +
'Signify LLVM testsuite tests'))
parser.add_option('-l', '--lit', action='store_true', dest='lit',
default=False,
help=('[--testsuite-report option] ' +
'Signify LLVM LIT regression tests'))
options, args = parser.parse_args(argv)
return options, args
def Fatal(text):
"""Prints an error message and exits."""
print(text, file=sys.stderr)
sys.exit(1)
def ParseConfig(options):
"""Constructs a frontend/backend dict based on --opt arguments.
Args:
options: The result of OptionParser().parse_args().
Returns:
A simple dict containing keys 'frontend_opt', 'frontend_attr',
'backend_opt', and 'backend_attr', each mapped to a valid string
value. The result is a function of the --opt command-line
arguments, with defaults in place when there are too few --opt
arguments.
"""
configs = dict(O0f={'frontend_opt': '-O0', 'frontend_attr': 'O0f'},
O3f={'frontend_opt': '-O3', 'frontend_attr': 'O3f'},
O0b={'backend_opt': '-translate-fast',
'backend_attr': 'O0b'},
O2b={'backend_opt': '-O2', 'backend_attr': 'O2b'},
O0b_sz={'backend_opt': '-translate-fast --use-sz',
'backend_attr': 'O0b_sz'},
O2b_sz={'backend_opt': '-O2 --use-sz',
'backend_attr': 'O2b_sz'},
)
result = {}
# Default is pnacl-clang -O3, pnacl-translate -O2
for attr in ['O3f', 'O2b'] + options.opt_attributes:
if attr in configs:
result.update(configs[attr])
return result
def GetConfigSuffix(config):
"""Create a string to be used as a file suffix.
Args:
config: A dict that was the result of ParseConfig().
Returns:
A string that concatenates the frontend and backend attributes.
"""
return config['frontend_attr'] + '_' + config['backend_attr']
def SetupEnvironment(options):
"""Create an environment.
This is based on the current system, various defaults, and various
environment variables.
Args:
options: The result of OptionParser.parse_args()
Returns:
A dict with various string->string mappings.
"""
env = {}
pwd = os.getcwd()
if not pwd.endswith(os.sep + 'native_client'):
Fatal("ERROR: must be run in native_client/ directory!\n" +
" (Current directory is " + pwd + ")")
# Simulate what's needed from common-tools.sh.
# We need PNACL_BUILDBOT, BUILD_PLATFORM, and HOST_ARCH.
# TODO(dschuff): This should come from toolchain_build or the upcoming common
# python infrastructure.
env['PNACL_BUILDBOT'] = os.environ.get('PNACL_BUILDBOT', 'false')
if sys.platform == 'linux2':
env['BUILD_PLATFORM'] = 'linux'
env['BUILD_ARCH'] = os.environ.get(
'BUILD_ARCH',
'x86_64' if pynacl.platform.IsArch64Bit() else 'i686')
env['HOST_ARCH'] = os.environ.get('HOST_ARCH', env['BUILD_ARCH'])
env['HOST_TRIPLE'] = env['HOST_ARCH'] + '_linux'
elif sys.platform == 'cygwin':
env['BUILD_PLATFORM'] = 'win'
env['HOST_ARCH'] = os.environ.get('HOST_ARCH', 'x86_32')
env['HOST_TRIPLE'] = 'i686_pc_cygwin'
elif sys.platform == 'darwin':
env['BUILD_PLATFORM'] = 'mac'
env['HOST_ARCH'] = os.environ.get('HOST_ARCH', 'x86_64')
env['HOST_TRIPLE'] = 'x86_64_apple_darwin'
elif sys.platform == 'win32':
env['BUILD_PLATFORM'] = 'win'
env['HOST_ARCH'] = os.environ.get('HOST_ARCH', 'x86_64')
env['HOST_TRIPLE'] = 'i686_w64_mingw32'
# TODO(dschuff) unify this with toolchain_build_pnacl
msys_path = os.environ.get(
'MSYS',
os.path.join(os.getcwd(), 'mingw32', 'msys', 'bin'))
os.environ['PATH'] = os.pathsep.join([os.environ['PATH'], msys_path])
else:
Fatal("Unknown system " + sys.platform)
if env['HOST_ARCH'] in ['i386', 'i686']:
env['HOST_ARCH'] = 'x86_32'
# Set up the rest of the environment.
env['NACL_ROOT'] = pwd
env['LLVM_TESTSUITE_SRC'] = (
'{NACL_ROOT}/toolchain_build/src/llvm-test-suite'.format(**env))
env['LLVM_TESTSUITE_BUILD'] = (
'{NACL_ROOT}/pnacl/build/llvm-test-suite'.format(**env))
env['TC_SRC_LLVM'] = (
'{NACL_ROOT}/toolchain_build/src/llvm'.format(**env))
env['TC_BUILD_LLVM'] = options.llvm_buildpath or (
'{NACL_ROOT}/toolchain_build/out/llvm_{HOST_TRIPLE}_work'.format(**env))
env['TC_BUILD_LIBCXX'] = (
('{NACL_ROOT}/toolchain_build/out/' +
'libcxx_le32_work/').format(**env))
env['PNACL_CONCURRENCY'] = os.environ.get('PNACL_CONCURRENCY', '8')
# The toolchain used may not be the one downloaded, but one that is freshly
# built into a different directory,
# Overriding the default here will not affect the sel_ldr
# and IRT used to run the tests (they are controlled by run.py)
env['PNACL_TOOLCHAIN_DIR'] = (
os.environ.get('PNACL_TOOLCHAIN_DIR',
'{BUILD_PLATFORM}_x86/pnacl_newlib'.format(**env)))
env['PNACL_BIN'] = (
'{NACL_ROOT}/toolchain/{PNACL_TOOLCHAIN_DIR}/bin'.format(**env))
env['PNACL_SDK_DIR'] = (
'{NACL_ROOT}/toolchain/{PNACL_TOOLCHAIN_DIR}/le32-nacl/lib'
.format(**env))
env['PNACL_SCRIPTS'] = '{NACL_ROOT}/pnacl/scripts'.format(**env)
env['LLVM_REGRESSION_KNOWN_FAILURES'] = (
'{pwd}/pnacl/scripts/llvm_regression_known_failures.txt'.format(pwd=pwd))
env['LIBCXX_KNOWN_FAILURES'] = (
'{pwd}/pnacl/scripts/libcxx_known_failures.txt'.format(pwd=pwd))
return env
def ToolchainWorkDirExists(work_dir):
# TODO(dschuff): Because this script is run directly from the buildbot
# script and not as part of a toolchain_build rule, we do not know
# whether the llvm target was actually built (in which case the working
# directory is still there) or whether it was just retrieved from cache
# (in which case it was clobbered, since the bots run with --clobber).
# Check if ninja or make rule exists.
return (os.path.isfile(os.path.join(work_dir, 'build.ninja')) or
os.path.isfile(os.path.join(work_dir, 'Makefile')))
def RunLitTest(testdir, testarg, lit_failures, env, options):
"""Run LLVM lit tests, and check failures against known failures.
Args:
testdir: Directory with the make/ninja file to test.
testarg: argument to pass to make/ninja.
env: The result of SetupEnvironment().
options: The result of OptionParser().parse_args().
Returns:
0 always
"""
with remember_cwd():
if not ToolchainWorkDirExists(testdir):
print('Working directory %s is empty. Not running tests' % testdir)
if env['PNACL_BUILDBOT'] != 'false' or options.verbose:
print('@@@STEP_TEXT (skipped)@@@')
return 0
os.chdir(testdir)
sub_env = os.environ.copy()
# Tell run.py to use the architecture specified by --arch, or the
# current host architecture if none was provided.
sub_env['PNACL_RUN_ARCH'] = options.arch or env['HOST_ARCH']
maker = 'ninja' if os.path.isfile('./build.ninja') else 'make'
cmd = [maker, testarg, '-v' if maker == 'ninja' else 'VERBOSE=1']
print('Running lit test:', ' '.join(cmd))
make_pipe = subprocess.Popen(cmd, env=sub_env, stdout=subprocess.PIPE)
lines = []
# When run by a buildbot, we need to incrementally tee the 'make'
# stdout to our stdout, rather than collect its entire stdout and
# print it at the end. Otherwise the watchdog may try to kill the
# process after too long without any output.
#
# Note: We use readline instead of 'for line in make_pipe.stdout'
# because otherwise the process on the Cygwin bot seems to hang
# when the 'make' process completes (with slightly truncated
# output). The readline avoids buffering when reading from a
# pipe in Python 2, which may be complicit in the problem.
for line in iter(make_pipe.stdout.readline, ''):
if env['PNACL_BUILDBOT'] != 'false' or options.verbose:
# The buildbots need to be fully verbose and print all output.
print(str(datetime.datetime.now()) + ' ' + line, end=' ')
lines.append(line)
print(str(datetime.datetime.now()) + ' ' +
"Waiting for '%s' to complete." % cmd)
make_pipe.wait()
make_stdout = ''.join(lines)
parse_options = vars(options)
parse_options['lit'] = True
parse_options['excludes'].append(env[lit_failures])
parse_options['attributes'].append(env['BUILD_PLATFORM'])
parse_options['attributes'].append(env['HOST_ARCH'])
print(str(datetime.datetime.now()) + ' ' +
'Parsing LIT test report output.')
ret = parse_llvm_test_report.Report(parse_options, filecontents=make_stdout)
return ret
def EnsureSdkExists(env):
"""Ensure that a build of the SDK exists. Exits if not.
Args:
env: The result of SetupEnvironment().
"""
libnacl_path = os.path.join(env['PNACL_SDK_DIR'], 'libnacl.a')
if not os.path.isfile(libnacl_path):
Fatal("""
ERROR: libnacl does not seem to exist in %s
ERROR: have you run 'pnacl/build.sh sdk' ?
""" % libnacl_path)
def TestsuitePrereq(env, options):
"""Run the LLVM test suite prerequisites.
Args:
env: The result of SetupEnvironment().
options: The result of OptionParser().parse_args().
Returns:
0 for success, non-zero integer on failure.
"""
arch = options.arch or Fatal("Error: missing --arch argument")
return subprocess.call(['./scons',
'platform=' + arch,
'irt_core',
'sel_ldr',
'elf_loader',
'-j{PNACL_CONCURRENCY}'.format(**env)])
def TestsuiteRun(env, config, options):
"""Run the LLVM test suite.
Args:
env: The result of SetupEnvironment().
config: A dict that was the result of ParseConfig(). This
determines the specific optimization levels.
options: The result of OptionParser().parse_args().
Returns:
0 for success, non-zero integer on failure.
"""
arch = options.arch or Fatal("Error: missing --arch argument")
EnsureSdkExists(env)
suffix = GetConfigSuffix(config)
opt_clang = config['frontend_opt']
opt_trans = config['backend_opt']
build_path = env['LLVM_TESTSUITE_BUILD']
if not os.path.isdir(build_path):
os.makedirs(build_path)
with remember_cwd():
os.chdir(build_path)
if not os.path.exists('Makefile'):
result = TestsuiteConfigure(env)
if result:
return result
result = subprocess.call(['make',
'-j{PNACL_CONCURRENCY}'.format(**env),
'OPTFLAGS=' + opt_clang,
'PNACL_TRANSLATE_FLAGS=' + opt_trans,
'PNACL_BIN={PNACL_BIN}'.format(**env),
'PNACL_RUN={NACL_ROOT}/run.py'.format(**env),
'COLLATE=true',
'PNACL_ARCH=' + arch,
'ENABLE_PARALLEL_REPORT=true',
'DISABLE_CBE=true',
'DISABLE_JIT=true',
'RUNTIMELIMIT=850',
'TEST=pnacl',
'report.csv'])
if result:
return result
os.rename('report.pnacl.csv', 'report.pnacl.{arch}.{suffix}.csv'
.format(arch=arch, suffix=suffix))
os.rename('report.pnacl.raw.out',
('report.pnacl.{arch}.{suffix}.raw.out'
.format(arch=arch, suffix=suffix)))
return 0
def TestsuiteConfigure(env):
"""Run the LLVM test suite configure script.
Args:
env: The result of SetupEnvironment().
Returns:
0 for success, non-zero integer on failure.
"""
build_path = env['LLVM_TESTSUITE_BUILD']
if not os.path.isdir(build_path):
os.makedirs(build_path)
with remember_cwd():
os.chdir(build_path)
args = ['{LLVM_TESTSUITE_SRC}/configure'.format(**env),
'--with-llvmcc=clang',
'--with-clang={PNACL_BIN}/pnacl-clang'.format(**env),
'--with-llvmsrc={TC_SRC_LLVM}'.format(**env),
'--with-llvmobj={TC_BUILD_LLVM}'.format(**env)]
result = subprocess.call(args)
return result
def TestsuiteClean(env):
"""Clean the LLVM test suite build directory.
Args:
env: The result of SetupEnvironment().
Returns:
0 always
Raises:
OSError: The LLVM_TESTSUITE_BUILD directory couldn't be removed
for some reason.
"""
if os.path.isdir(env['LLVM_TESTSUITE_BUILD']):
shutil.rmtree(env['LLVM_TESTSUITE_BUILD'])
elif os.path.isfile(env['LLVM_TESTSUITE_BUILD']):
os.remove(env['LLVM_TESTSUITE_BUILD'])
return 0
def TestsuiteReport(env, config, options):
"""Generate a report from the prior LLVM test suite run.
Args:
env: The result of SetupEnvironment().
config: A dict that was the result of ParseConfig(). This
determines the specific optimization levels.
options: The result of OptionParser().parse_args().
Returns:
0 for success, non-zero integer on failure.
"""
arch = options.arch or Fatal("Error: missing --arch argument")
suffix = GetConfigSuffix(config)
report_file = ('{LLVM_TESTSUITE_BUILD}/report.pnacl.{arch}.{suffix}.csv'
.format(arch=arch, suffix=suffix, **env))
failures1 = '{PNACL_SCRIPTS}/testsuite_known_failures_base.txt'.format(**env)
failures2 = '{PNACL_SCRIPTS}/testsuite_known_failures_pnacl.txt'.format(**env)
parse_options = vars(options)
parse_options['excludes'].extend([failures1, failures2])
parse_options['buildpath'] = env['LLVM_TESTSUITE_BUILD']
parse_options['attributes'].extend([arch,
config['frontend_attr'],
config['backend_attr']])
parse_options['testsuite'] = True
return parse_llvm_test_report.Report(parse_options, filename=report_file)
def RunTestsuiteSteps(env, config, options):
result = 0
if not ToolchainWorkDirExists(env['TC_BUILD_LLVM']):
print('LLVM build directory %s is empty. Skipping testsuite' %
env['TC_BUILD_LLVM'])
if env['PNACL_BUILDBOT'] != 'false' or options.verbose:
print('@@@STEP_TEXT (skipped)@@@')
return result
if options.testsuite_all or options.testsuite_prereq:
result = result or TestsuitePrereq(env, options)
if options.testsuite_all or options.testsuite_clean:
result = result or TestsuiteClean(env)
if options.testsuite_all or options.testsuite_configure:
result = result or TestsuiteConfigure(env)
if options.testsuite_all or options.testsuite_run:
result = result or TestsuiteRun(env, config, options)
if options.testsuite_all or options.testsuite_report:
result = result or TestsuiteReport(env, config, options)
return result
def main(argv):
options, args = ParseCommandLine(argv[1:])
if len(args):
Fatal("Unknown arguments: " + ', '.join(args))
config = ParseConfig(options)
env = SetupEnvironment(options)
result = 0
# Run each specified test in sequence, and return on the first failure.
if options.run_llvm_regression:
result = result or RunLitTest(env['TC_BUILD_LLVM'], 'check-all',
'LLVM_REGRESSION_KNOWN_FAILURES',
env, options)
if options.run_libcxx_tests:
EnsureSdkExists(env)
result = result or RunLitTest(env['TC_BUILD_LIBCXX'], 'check-libcxx',
'LIBCXX_KNOWN_FAILURES',
env, options)
result = result or RunTestsuiteSteps(env, config, options)
return result
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | -7,646,420,506,978,552,000 | 38.438433 | 80 | 0.619944 | false |
Ralf3/samt2 | fuzzy/gui/helpwin_ui.py | 1 | 4487 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'helpwin.ui'
#
# Created: Fri Feb 5 15:59:05 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_helpwin(object):
def setupUi(self, helpwin):
helpwin.setObjectName(_fromUtf8("helpwin"))
helpwin.resize(960, 599)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("icons/area.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
helpwin.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(helpwin)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.textBrowser = QtGui.QTextBrowser(self.centralwidget)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Sans Serif"))
font.setPointSize(11)
self.textBrowser.setFont(font)
self.textBrowser.setObjectName(_fromUtf8("textBrowser"))
self.verticalLayout.addWidget(self.textBrowser)
helpwin.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(helpwin)
self.menubar.setGeometry(QtCore.QRect(0, 0, 960, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
helpwin.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(helpwin)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
helpwin.setStatusBar(self.statusbar)
self.toolbar = QtGui.QToolBar(helpwin)
self.toolbar.setObjectName(_fromUtf8("toolbar"))
helpwin.addToolBar(QtCore.Qt.TopToolBarArea, self.toolbar)
self.actionHome = QtGui.QAction(helpwin)
self.actionHome.setObjectName(_fromUtf8("actionHome"))
self.actionBack = QtGui.QAction(helpwin)
self.actionBack.setObjectName(_fromUtf8("actionBack"))
self.actionForward = QtGui.QAction(helpwin)
self.actionForward.setObjectName(_fromUtf8("actionForward"))
self.actionTop = QtGui.QAction(helpwin)
self.actionTop.setObjectName(_fromUtf8("actionTop"))
self.actionSpacer = QtGui.QAction(helpwin)
self.actionSpacer.setObjectName(_fromUtf8("actionSpacer"))
self.toolbar.addAction(self.actionHome)
self.toolbar.addAction(self.actionSpacer)
self.toolbar.addAction(self.actionBack)
self.toolbar.addAction(self.actionForward)
self.toolbar.addAction(self.actionTop)
self.toolbar.addSeparator()
self.retranslateUi(helpwin)
QtCore.QMetaObject.connectSlotsByName(helpwin)
def retranslateUi(self, helpwin):
helpwin.setWindowTitle(_translate("helpwin", "SAMT2 - Fuzzy ----- Manual", None))
self.textBrowser.setHtml(_translate("helpwin", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans Serif\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>", None))
self.toolbar.setWindowTitle(_translate("helpwin", "toolBar", None))
self.actionHome.setText(_translate("helpwin", "Home", None))
self.actionBack.setText(_translate("helpwin", "Back", None))
self.actionForward.setText(_translate("helpwin", "Forward", None))
self.actionTop.setText(_translate("helpwin", "Top", None))
self.actionTop.setStatusTip(_translate("helpwin", "Back to top", None))
self.actionTop.setWhatsThis(_translate("helpwin", "Top", None))
self.actionSpacer.setText(_translate("helpwin", "Spacer", None))
self.actionSpacer.setToolTip(_translate("helpwin", "Spacer", None))
| gpl-3.0 | 2,420,758,255,669,234,700 | 48.307692 | 180 | 0.685313 | false |
tensorflow/probability | tensorflow_probability/python/distributions/vector_exponential_linear_operator.py | 1 | 13153 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Vectorized Exponential distribution class, directly using LinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import chain as chain_bijector
from tensorflow_probability.python.bijectors import scale_matvec_linear_operator
from tensorflow_probability.python.bijectors import shift as shift_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import exponential
from tensorflow_probability.python.distributions import sample
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = ['VectorExponentialLinearOperator']
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class VectorExponentialLinearOperator(
distribution.AutoCompositeTensorDistribution,
transformed_distribution.TransformedDistribution):
"""The vectorization of the Exponential distribution on `R^k`.
The vector exponential distribution is defined over a subset of `R^k`, and
parameterized by a (batch of) length-`k` `loc` vector and a (batch of) `k x k`
`scale` matrix: `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is
```none
pdf(y; loc, scale) = exp(-||x||_1) / Z, for y in S(loc, scale),
x = inv(scale) @ (y - loc),
Z = |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `S = {loc + scale @ x : x in R^k, x_1 > 0, ..., x_k > 0}`, is an image of
the positive half-space,
* `||x||_1` denotes the `l1` norm of `x`, `sum_i |x_i|`,
* `Z` denotes the normalization constant.
The VectorExponential distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Exponential(rate=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorExponential` and `Vector` distributions in TensorFlow.
The `VectorExponential` is a non-standard distribution that has useful
properties.
The marginals `Y_1, ..., Y_k` are *not* Exponential random variables, due to
the fact that the sum of Exponential random variables is not Exponential.
Instead, `Y` is a vector whose components are linear combinations of
Exponential random variables. Thus, `Y` lives in the vector space generated
by `vectors` of Exponential distributions. This allows the user to decide the
mean and covariance (by setting `loc` and `scale`), while preserving some
properties of the Exponential distribution. In particular, the tails of `Y_i`
will be (up to polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Exponential random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
tfd = tfp.distributions
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.
mat = [[1.0, 0.1],
[0.1, 1.0]]
vex = tfd.VectorExponentialLinearOperator(
scale=tf.linalg.LinearOperatorFullMatrix(mat))
# Compute the pdf of an `R^2` observation; return a scalar.
vex.prob([1., 2.]) # shape: []
# Initialize a 2-batch of 3-variate Vector Exponential's.
mu = [[1., 2, 3],
[1., 0, 0]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vex = tfd.VectorExponentialLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[1.9, 2.2, 3.1],
[10., 1.0, 9.0]] # shape: [2, 3]
vex.prob(x) # shape: [2]
```
"""
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name='VectorExponentialLinearOperator'):
"""Construct Vector Exponential distribution supported on a subset of `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if loc is None:
loc = 0.0 # Implicit value for backwards compatibility.
if scale is None:
raise ValueError('Missing required `scale` parameter.')
if not dtype_util.is_floating(scale.dtype):
raise TypeError('`scale` parameter must have floating-point dtype.')
with tf.name_scope(name) as name:
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
# TODO(b/190433277): Verify GradientTape safety and use
# `convert_nonref_to_tensor` on `loc`.
loc = loc if loc is None else tf.convert_to_tensor(
loc, name='loc', dtype=scale.dtype)
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self._loc = loc
self._scale = scale
super(VectorExponentialLinearOperator, self).__init__(
# TODO(b/137665504): Use batch-adding meta-distribution to set the
# batch shape instead of tf.ones.
# We use `Sample` instead of `Independent` because `Independent`
# requires concatenating `batch_shape` and `event_shape`, which loses
# static `batch_shape` information when `event_shape` is not
# statically known.
distribution=sample.Sample(
exponential.Exponential(
rate=tf.ones(batch_shape, dtype=scale.dtype),
allow_nan_stats=allow_nan_stats),
event_shape),
bijector=shift_bijector.Shift(shift=loc)(
scale_matvec_linear_operator.ScaleMatvecLinearOperator(
scale=scale, validate_args=validate_args)),
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self._loc
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self._scale
experimental_is_sharded = False
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(VectorExponentialLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(VectorExponentialLinearOperator, self)._prob(x)
def _mean(self):
# Let
# W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
# Then this distribution is
# X = loc + LW,
# and then E[X] = loc + L1, where 1 is the vector of ones.
scale_x_ones = self.scale.matvec(
tf.ones(self._mode_mean_shape(), self.dtype))
if self.loc is None:
return scale_x_ones
return tf.identity(self.loc) + scale_x_ones
def _covariance(self):
# Let
# W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
# Then this distribution is
# X = loc + LW,
# and then since Cov(wi, wj) = 1 if i=j, and 0 otherwise,
# Cov(X) = L Cov(W W^T) L^T = L L^T.
if distribution_util.is_diagonal_scale(self.scale):
answer = tf.linalg.diag(tf.square(self.scale.diag_part()))
else:
answer = self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
return self._broadcast_covariance_like_with_loc(answer)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
answer = tf.square(self.scale.diag_part())
elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
answer = tf.linalg.diag_part(self.scale.matmul(self.scale.to_dense()))
else:
answer = tf.linalg.diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
return self._broadcast_variance_like_with_loc(answer)
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
answer = tf.abs(self.scale.diag_part())
elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
answer = tf.sqrt(
tf.linalg.diag_part(self.scale.matmul(self.scale.to_dense())))
else:
answer = tf.sqrt(
tf.linalg.diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
return self._broadcast_variance_like_with_loc(answer)
def _mode(self):
scale_x_zeros = self.scale.matvec(
tf.zeros(self._mode_mean_shape(), self.dtype))
if self.loc is None:
return scale_x_zeros
return tf.identity(self.loc) + scale_x_zeros
def _mode_mean_shape(self):
"""Shape for the mode/mean Tensors."""
shape = tensorshape_util.concatenate(self.batch_shape, self.event_shape)
has_static_shape = tensorshape_util.is_fully_defined(shape)
if not has_static_shape:
shape = tf.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
return shape
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
loc = 0. if self.loc is None else tf.convert_to_tensor(self.loc)
y = self.scale.solvevec(x - loc)
assertions.append(assert_util.assert_non_negative(
y, message='Sample is not contained in the support.'))
return assertions
def _default_event_space_bijector(self):
return chain_bijector.Chain([
shift_bijector.Shift(shift=self.loc, validate_args=self.validate_args),
scale_matvec_linear_operator.ScaleMatvecLinearOperator(
scale=self.scale, validate_args=self.validate_args),
softplus_bijector.Softplus(validate_args=self.validate_args)
], validate_args=self.validate_args)
def _broadcast_variance_like_with_loc(self, item):
if self.loc is None:
return item
return item + tf.zeros_like(self.loc)
def _broadcast_covariance_like_with_loc(self, item):
if self.loc is None:
return item
if tensorshape_util.rank(self.loc.shape) == 0:
# Scalar loc is irrelevant; but this check only works if that's
# known statically.
return item
event_shape = self.event_shape_tensor()
cov_shape = tf.concat(
[self.batch_shape_tensor(), event_shape, event_shape], axis=0)
return tf.broadcast_to(item, cov_shape)
| apache-2.0 | 7,902,681,406,365,660,000 | 37.014451 | 81 | 0.665095 | false |
v-iam/azure-sdk-for-python | azure-mgmt-billing/azure/mgmt/billing/models/__init__.py | 3 | 1200 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .billing_period import BillingPeriod
from .download_url import DownloadUrl
from .error_details import ErrorDetails
from .error_response import ErrorResponse, ErrorResponseException
from .invoice import Invoice
from .operation_display import OperationDisplay
from .operation import Operation
from .resource import Resource
from .billing_period_paged import BillingPeriodPaged
from .invoice_paged import InvoicePaged
from .operation_paged import OperationPaged
__all__ = [
'BillingPeriod',
'DownloadUrl',
'ErrorDetails',
'ErrorResponse', 'ErrorResponseException',
'Invoice',
'OperationDisplay',
'Operation',
'Resource',
'BillingPeriodPaged',
'InvoicePaged',
'OperationPaged',
]
| mit | 6,976,934,034,514,775,000 | 32.333333 | 76 | 0.668333 | false |
mitchins/pelican-plugins | neighbors/neighbors.py | 13 | 1936 | # -*- coding: utf-8 -*-
"""
Neighbor Articles Plugin for Pelican
====================================
This plugin adds ``next_article`` (newer) and ``prev_article`` (older)
variables to the article's context
"""
from pelican import signals
def iter3(seq):
it = iter(seq)
nxt = None
cur = next(it)
for prv in it:
yield nxt, cur, prv
nxt, cur = cur, prv
yield nxt, cur, None
def get_translation(article, prefered_language):
if not article:
return None
for translation in article.translations:
if translation.lang == prefered_language:
return translation
return article
def set_neighbors(articles, next_name, prev_name):
for nxt, cur, prv in iter3(articles):
exec("cur.{} = nxt".format(next_name))
exec("cur.{} = prv".format(prev_name))
for translation in cur.translations:
exec(
"translation.{} = get_translation(nxt, translation.lang)".format(
next_name))
exec(
"translation.{} = get_translation(prv, translation.lang)".format(
prev_name))
def neighbors(generator):
set_neighbors(generator.articles, 'next_article', 'prev_article')
for category, articles in generator.categories:
articles.sort(key=(lambda x: x.date), reverse=(True))
set_neighbors(
articles, 'next_article_in_category', 'prev_article_in_category')
if hasattr(generator, 'subcategories'):
for subcategory, articles in generator.subcategories:
articles.sort(key=(lambda x: x.date), reverse=(True))
index = subcategory.name.count('/')
next_name = 'next_article_in_subcategory{}'.format(index)
prev_name = 'prev_article_in_subcategory{}'.format(index)
set_neighbors(articles, next_name, prev_name)
def register():
signals.article_generator_finalized.connect(neighbors)
| agpl-3.0 | 6,031,071,882,090,224,000 | 32.37931 | 77 | 0.615186 | false |
kjiang8/Ardustat | Deprecated_Unsupported/Python_Client/ardustat_library_no_class.py | 1 | 6942 | import serial
import socket
import glob
import pickle
from time import sleep,time
import subprocess
import sys
import os
import atexit
port = ""
ser = serial.Serial()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mode = "socket"
debug = False
chatty = False
groundvalue = 0
p = None
def findPorts():
"""A commands to find possible ardustat ports with no Arguments, """
return glob.glob("/dev/tty.u*")
def auto_connect(sport):
"""This seems to work now! Enter the port to try listening. Trys to start a serial forwarder if one isn't started"""
port = ""
start_server = True
#try:
# connect(sport)
#except:
# start_server == True
if start_server and os.name == "posix":
#try os x
if len(glob.glob("/dev/tty.u*")) > 0:
port = glob.glob("/dev/tty.u*")
elif len(glob.glob("/dev/ttyUSB*")) > 0:
port = glob.glob("/dev/ttyUSB*")
else:
print "can't see any ardustats. PEACE."
sys.exit()
if len(port) > 0:
print port[0]
try:
p = subprocess.Popen(("python tcp_serial_redirect.py "+"-P "+str(sport)+" "+port[0]+" 57600").split())
print "connected!"
except:
print "probably open trying to connect"
sleep(3)
s.connect(("localhost",sport))
print "ardustat should be responding, trying a blink"
else:
print "not seeing any ardustats. PEACE."
sys.exit()
def blink():
rawwrite(" ");
def connect(port):
if mode == "serial":
ser = serial.Serial(port,57600)
ser.timeout = 1
ser.open()
return "connected to serial"
if mode == "socket":
s.connect(("localhost",port))
return "connected to socket"
def ocv():
rawwrite("-0000")
def potentiostat(potential):
"""Argument: Potential (V). Sets the potentiostat"""
#potential = potential#+groundvalue
if potential < 0: potential = str(2000+int(1023*(abs(potential)/5.0))).rjust(4,"0")
else: potential = str(int(1023*(potential/5.0))).rjust(4,"0")
if potential == "2000": potential = "0000"
rawwrite("p"+potential)
def rawwrite(command):
if mode == "serial":
ser.write(command+"\n")
if mode == "socket":
s.send(command)
sleep(.05)
if chatty:
return parsedread()
def rawread():
rawwrite("s0000")
sleep(.01)
if mode == "serial":
data = ser.readlines()
return ser.readlines()[-1]
if mode == "socket":
a = ""
while 1:
a += s.recv(1024)
if a.find("ST\r\n") > 1: return a.strip()
def solve_for_r(input_r,v_in,v_out):
return input_r*((v_in/v_out)-1)
r_fixed_bool = False
r_fixed = 50
def galvanostat(current):
"""Tries to pick the ideal resistance and sets a current difference"""
#V = I R -> I = delta V / R
#goal -> delta V = .2 V
R_goal = abs(.1 / current)
R_real = 10000
R_set = 0
err = 1000
for d in res_table:
this_err = abs(res_table[d][0]-R_goal)
if this_err < err:
err = this_err
R_set = d
R_real = res_table[d][0]
#Solve for real delta V
if r_fixed_bool:
R_real = r_fixed
delta_V = abs(current*R_real)
if debug: print current,delta_V,R_real,R_set
potential = str(int(1023*(delta_V/5.0))).rjust(4,"0")
if current < 0:
potential = str(int(potential)+2000)
if debug: print "gstat setting:", potential
print potential
if potential == "2000" or potential == "0000":
ocv()
else:
#Write!
rawwrite("r"+str(R_set).rjust(4,"0"))
sleep(.1)
rawwrite("g"+str(potential))
res_table = {}
def load_resistance_table(id):
res_table = pickle.load(open("unit_"+str(id)+".pickle"))
if debug: print res_table
id = id
def calibrate(known_r,id):
"""Runs a calibration by setting the resistance against a known resistor and solving the voltage divider equation. Cycles through all possible resistances"""
#Make a Big List of Correlations
ressers = []
rawwrite("R")
sleep(.1)
rawwrite("r0000")
for i in range(0,10):
for y in range(0,255):
rawwrite("r"+str(y).rjust(4,"0"))
sleep(.05)
values = parsedread()
if values['valid']:
solved_r = solve_for_r(known_r,values['DAC0_ADC'],values['cell_ADC'])
print values['pot_step'], solved_r
ressers.append([int(values['pot_step']), solved_r])
else: print "bad read"
ocv()
#Make a Big List of Correlations
big_dict = {}
for r in ressers:
try:
big_dict[r[0]].append(r[1])
except:
big_dict[r[0]] = []
big_dict[r[0]].append(r[1])
#Find Values
final_dict = {}
for b in big_dict.keys():
final_dict[b] = [sum(big_dict[b])/len(big_dict[b]),(max(big_dict[b])-min(big_dict[b]))/2.0]
pickle.dump(final_dict,open("unit_"+str(id)+".pickle","w"))
return final_dict
def parsedread():
return parseline(rawread())
def moveground():
"""Argument: Potential (V). Moves ground to allow negative potentials. Check jumper position!"""
potential = str(int(1023*(groundvalue/5.0))).rjust(4,"0")
rawwrite("d"+potential)
def refbasis(reading,ref):
"""Argument: raw ADC reading, raw ADC basis. Returns an absolute potential based on the ADC reading against the 2.5 V reference
(reading from pot as a value between 0 and 1023, reference value in V (e.g. 2.5))"""
return round((float(reading)/float(ref))*2.5,3)
def resbasis(reading,pot):
"""Argument, raw pot setting, max pOT reading. Returns the value for the givening potentiometer setting
(reading as value between 0 and 255, pot lookup variable). Wildly Inaccurate. Don't use."""
return round(10+(float(reading)/255.0)*pot,2)
def parseline(reading):
outdict = {}
#format GO,1023,88,88,255,0,1,-0000,0,0,510,ST
#splitline[0] -> "GO"
#splitline[1] -> DAC Setting
#splitline[2] -> Cell Voltage
#splitline[3] -> DAC Measurement
#splitline[4] -> DVR Setting
#splitline[5] -> "setout" variable in firmware. Don't know what this represents
#splitline[6] -> Mode (0 = manual, 1 = OCV, 2 = potentiostat, 3 = galvanostat)
#splitline[7] -> Last command received
#splitline[8] -> GND Measurement
#splitline[9] -> Reference Electrode
#splitline[10] -> Reference Voltage
#splitline[11] -> "ST"
if reading.find("GO") == 0 and reading.find("ST") and reading.rfind("GO") == 0:
outdict['valid'] = True
outdict['raw'] = reading
outdict['time'] = time()
parts = reading.split(",")
outdict['ref'] = float(parts[len(parts)-3])
outdict['DAC0_ADC'] = refbasis(parts[3],outdict['ref'])-refbasis(parts[8],outdict['ref'])
outdict['cell_ADC'] = refbasis(parts[2],outdict['ref'])-refbasis(parts[8],outdict['ref'])
outdict['ref_ADC'] = refbasis(parts[9],outdict['ref'])-refbasis(parts[8],outdict['ref'])
outdict['pot_step'] = parts[4]
outdict['work_v_ref'] = outdict['cell_ADC'] - outdict['ref_ADC']
outdict['unit_no'] = int(parts[11])
##Try to read from the res_table, otherwise make the dangerous assumption
try:
outdict['res'] = res_table[int(outdict['pot_step'])][0]
except Exception, err:
if debug: print err
outdict['res'] = resbasis(outdict['pot_step'],10000.0)
outdict['current'] = (float(outdict['DAC0_ADC'])-float(outdict['cell_ADC']))/outdict['res']
else:
outdict['valid'] = False
return outdict
| bsd-2-clause | -3,351,243,363,049,091,000 | 27.334694 | 159 | 0.653558 | false |
Saurabh7/shogun | examples/undocumented/python_modular/kernel_comm_word_string_modular.py | 26 | 1453 | #!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,4,0,False, False],[traindat,testdat,4,0,False,False]]
def kernel_comm_word_string_modular (fm_train_dna=traindat, fm_test_dna=testdat, order=3, gap=0, reverse = False, use_sign = False):
from modshogun import CommWordStringKernel
from modshogun import StringWordFeatures, StringCharFeatures, DNA
from modshogun import SortWordString
charfeat=StringCharFeatures(DNA)
charfeat.set_features(fm_train_dna)
feats_train=StringWordFeatures(charfeat.get_alphabet())
feats_train.obtain_from_char(charfeat, order-1, order, gap, reverse)
preproc=SortWordString()
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor()
charfeat=StringCharFeatures(DNA)
charfeat.set_features(fm_test_dna)
feats_test=StringWordFeatures(charfeat.get_alphabet())
feats_test.obtain_from_char(charfeat, order-1, order, gap, reverse)
feats_test.add_preprocessor(preproc)
feats_test.apply_preprocessor()
kernel=CommWordStringKernel(feats_train, feats_train, use_sign)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('CommWordString')
kernel_comm_word_string_modular(*parameter_list[0])
| mit | 4,664,420,409,205,243,000 | 35.325 | 132 | 0.770819 | false |
miketwo/pylacuna | pylacuna/enhancedbody.py | 1 | 4727 | #!/usr/bin/env python
import pylacuna.enhancedbuilding as enhancedbuilding
# from pylacuna.core.body import Body
# from caching import GLOBAL_CACHE
class EnhancedBody(object):
def __init__(self, body, cache):
'''
body -- A body object on which to perform calcs
cache -- a cache object. Must support methods:
get()
store()
keys()
'''
self.cache = cache
self._body = body
self.buildings = self.get_buildings()
def __str__(self):
try:
desc = ("{name} ({id}) at <{x},{y}>\n"
"Size {size} {type} in orbit {orbit} around {star_name} ({star_id})\n"
"".format(**self._body))
desc += "RESOURCES:\n" + self.get_resources()
if self.is_owned():
desc += "PRODUCTION:\n" + self.get_production()
except KeyError:
return str(self.__dict__)
return desc
def repair_list(self, building_ids):
return self._body.repair_list(building_ids)
def rearrange_buildings(self, arrangement):
return self._body.rearrange_buildings(arrangement)
def get_buildable(self, x, y, tag):
return self._body.get_buildable(x, y, tag)
def rename(self, name):
return self._body.self(name)
def abandon(self):
return self._body.abandon()
def view_laws(self):
return self._body.view_laws()
def build(self, building_name, x, y):
return self._body.self(building_name, x, y)
# --------------------------------------
def value(self):
print self
self.get_buildings()
FOOD_FACTOR = 1.0
WATER_FACTOR = 1.0
ORE_FACTOR = 1.0
ENERGY_FACTOR = 1.0
HAPPINESS_FACTOR = 1.0
OPEN_PLOTS_FACTOR = 1.0
WASTE_FACTOR = 1.0
OPEN_PLOTS_FACTOR = 1.0
open_plots = int(self['plots_available']) - int(self['building_count'])
CAPACITY_TARGET = 0.5
resources = ['food', 'water', 'ore', 'energy', 'waste']
cap_values = []
return int(self['food_hour']) + int(self['water_hour']) + int(self['ore_hour']) + int(self['energy_hour']) + int(self['happiness_hour']) + open_plots - int(self['waste_hour'])
def is_owned(self):
return True if 'building_count' in self else False
def evaluate_upgrades(self, list_of_buildings):
for bldg in list_of_buildings:
if int(bldg['upgrade']['can']) != 1:
print "Building {}({}) is not upgradeable".format(bldg['name'], bldg.id)
continue
tmp = bldg.get_stats_for_level(int(bldg['level']) + 1)
buildingup = enhancedbuilding.EnhancedBuilding.from_building(tmp)
buildingnow = enhancedbuilding.EnhancedBuilding.from_building(bldg)
diff = buildingup - buildingnow
print diff
def get_production_buildings(self):
print "CURRENT CACHE: {}\n{}".format(len(self.cache.keys()), self.cache.keys())
self.get_buildings()
prod = []
for bldg in self.buildings:
if "ore_hour" not in bldg:
print "Skipping"
continue
if (int(bldg["ore_hour"]) > 0 or
int(bldg["water_hour"]) > 0 or
int(bldg["food_hour"]) > 0 or
int(bldg["energy_hour"]) > 0):
print bldg
prod.append(bldg)
return prod
def get_buildings(self):
self.buildings = self._body.get_buildings()
for bldg in self.buildings:
bldg = enhancedbuilding.EnhancedBuilding(self.cache, bldg)
bldg.view()
return self.buildings
def get_production(self):
desc = " Water: {}/{} at {}/hr\n".format(self['water_stored'], self['water_capacity'], self['water_hour'])
desc += " Energy: {}/{} at {}/hr\n".format(self['energy_stored'], self['energy_capacity'], self['energy_hour'])
desc += " Food: {}/{} at {}/hr\n".format(self['food_stored'], self['food_capacity'], self['food_hour'])
desc += " Ore: {}/{} at {}/hr\n".format(self['ore_stored'], self['ore_capacity'], self['ore_hour'])
desc += " Waste: {}/{} at {}/hr\n".format(self['waste_stored'], self['waste_capacity'], self['waste_hour'])
return desc
def get_resources(self):
return " Water - {}\n{}".format(self['water'], self.get_ore_pretty())
def get_ore_pretty(self):
desc = ""
for material in self['ore']:
amount = int(self['ore'][material])
if amount > 1:
desc += " {} - {}\n".format(material, amount)
return desc
| mit | 1,330,059,170,199,518,000 | 36.220472 | 183 | 0.541781 | false |
pongem/python-bot-project | appengine/standard/botapp/env/lib/python2.7/site-packages/django/contrib/admin/filters.py | 49 | 18042 | """
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.utils import (
get_model_from_relation, prepare_lookup_value, reverse_field_path,
)
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import models
from django.utils import timezone
from django.utils.encoding import force_text, smart_text
from django.utils.translation import ugettext_lazy as _
class ListFilter(object):
title = None # Human-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')
def choices(self, changelist):
"""
Returns choices ready to be output in the template.
`changelist` is the ChangeList to be displayed.
"""
raise NotImplementedError('subclasses of ListFilter must provide a choices() method')
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(
request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name)
def lookups(self, request, model_admin):
"""
Must be overridden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError(
'The SimpleListFilter.lookups() method must be overridden to '
'return a list of tuples (value, verbose value)')
def expected_parameters(self):
return [self.parameter_name]
def choices(self, changelist):
yield {
'selected': self.value() is None,
'query_string': changelist.get_query_string({}, [self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': changelist.get_query_string({self.parameter_name: lookup}, []),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super(FieldListFilter, self).__init__(
request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(field, request, params, model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
self.lookup_kwarg = '%s__%s__exact' % (field_path, field.target_field.name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
self.lookup_choices = self.field_choices(field, request, model_admin)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
self.empty_value_display = model_admin.get_empty_value_display()
@property
def include_empty_choice(self):
"""
Return True if a "(None)" choice should be included, which filters
out everything except empty relationships.
"""
return self.field.null or (self.field.is_relation and self.field.many_to_many)
def has_output(self):
if self.include_empty_choice:
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def field_choices(self, field, request, model_admin):
return field.get_choices(include_blank=False)
def choices(self, changelist):
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': changelist.get_query_string(
{},
[self.lookup_kwarg, self.lookup_kwarg_isnull]
),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': changelist.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if self.include_empty_choice:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': changelist.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': self.empty_value_display,
}
FieldListFilter.register(lambda f: f.remote_field, RelatedFieldListFilter)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg2 = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2)
super(BooleanFieldListFilter, self).__init__(field, request, params, model, model_admin, field_path)
if (self.used_parameters and self.lookup_kwarg in self.used_parameters and
self.used_parameters[self.lookup_kwarg] in ('1', '0')):
self.used_parameters[self.lookup_kwarg] = bool(int(self.used_parameters[self.lookup_kwarg]))
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, changelist):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': changelist.get_query_string({
self.lookup_kwarg: lookup,
}, [self.lookup_kwarg2]),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_val2 == 'True',
'query_string': changelist.get_query_string({
self.lookup_kwarg2: 'True',
}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
FieldListFilter.register(
lambda f: isinstance(f, (models.BooleanField, models.NullBooleanField)),
BooleanFieldListFilter
)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
super(ChoicesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, changelist):
yield {
'selected': self.lookup_val is None,
'query_string': changelist.get_query_string(
{}, [self.lookup_kwarg, self.lookup_kwarg_isnull]
),
'display': _('All')
}
none_title = ''
for lookup, title in self.field.flatchoices:
if lookup is None:
none_title = title
continue
yield {
'selected': smart_text(lookup) == self.lookup_val,
'query_string': changelist.get_query_string(
{self.lookup_kwarg: lookup}, [self.lookup_kwarg_isnull]
),
'display': title,
}
if none_title:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': changelist.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': none_title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.date_params = {k: v for k, v in params.items() if k.startswith(self.field_generic)}
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
if today.month == 12:
next_month = today.replace(year=today.year + 1, month=1, day=1)
else:
next_month = today.replace(month=today.month + 1, day=1)
next_year = today.replace(year=today.year + 1, month=1, day=1)
self.lookup_kwarg_since = '%s__gte' % field_path
self.lookup_kwarg_until = '%s__lt' % field_path
self.links = (
(_('Any date'), {}),
(_('Today'), {
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This month'), {
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(next_month),
}),
(_('This year'), {
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(next_year),
}),
)
if field.null:
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.links += (
(_('No date'), {self.field_generic + 'isnull': 'True'}),
(_('Has date'), {self.field_generic + 'isnull': 'False'}),
)
super(DateFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
params = [self.lookup_kwarg_since, self.lookup_kwarg_until]
if self.field.null:
params.append(self.lookup_kwarg_isnull)
return params
def choices(self, changelist):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': changelist.get_query_string(param_dict, [self.field_generic]),
'display': title,
}
FieldListFilter.register(
lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.empty_value_display = model_admin.get_empty_value_display()
parent_model, reverse_path = reverse_field_path(model, field_path)
# Obey parent ModelAdmin queryset when deciding which options to show
if model == parent_model:
queryset = model_admin.get_queryset(request)
else:
queryset = parent_model._default_manager.all()
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, changelist):
yield {
'selected': self.lookup_val is None and self.lookup_val_isnull is None,
'query_string': changelist.get_query_string({}, [self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_text(val)
yield {
'selected': self.lookup_val == val,
'query_string': changelist.get_query_string({
self.lookup_kwarg: val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': changelist.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': self.empty_value_display,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
class RelatedOnlyFieldListFilter(RelatedFieldListFilter):
def field_choices(self, field, request, model_admin):
pk_qs = model_admin.get_queryset(request).distinct().values_list('%s__pk' % self.field_path, flat=True)
return field.get_choices(include_blank=False, limit_choices_to={'pk__in': pk_qs})
| apache-2.0 | -2,297,045,167,604,673,000 | 39.726862 | 111 | 0.591952 | false |
xiaohaidao007/pandoraBox-SDK-mt7620 | staging_dir/host/lib/scons-2.5.0/SCons/Debug.py | 3 | 7481 | """SCons.Debug
Code for debugging SCons internal things. Shouldn't be
needed by most users. Quick shortcuts:
from SCons.Debug import caller_trace
caller_trace()
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Debug.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import os
import sys
import time
import weakref
import inspect
# Global variable that gets set to 'True' by the Main script,
# when the creation of class instances should get tracked.
track_instances = False
# List of currently tracked classes
tracked_classes = {}
def logInstanceCreation(instance, name=None):
if name is None:
name = instance.__class__.__name__
if name not in tracked_classes:
tracked_classes[name] = []
if hasattr(instance, '__dict__'):
tracked_classes[name].append(weakref.ref(instance))
else:
# weakref doesn't seem to work when the instance
# contains only slots...
tracked_classes[name].append(instance)
def string_to_classes(s):
if s == '*':
return sorted(tracked_classes.keys())
else:
return s.split()
def fetchLoggedInstances(classes="*"):
classnames = string_to_classes(classes)
return [(cn, len(tracked_classes[cn])) for cn in classnames]
def countLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write("%s: %d\n" % (classname, len(tracked_classes[classname])))
def listLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
if inspect.isclass(ref):
obj = ref()
else:
obj = ref
if obj is not None:
file.write(' %s\n' % repr(obj))
def dumpLoggedInstances(classes, file=sys.stdout):
for classname in string_to_classes(classes):
file.write('\n%s:\n' % classname)
for ref in tracked_classes[classname]:
obj = ref()
if obj is not None:
file.write(' %s:\n' % obj)
for key, value in obj.__dict__.items():
file.write(' %20s : %s\n' % (key, value))
if sys.platform[:5] == "linux":
# Linux doesn't actually support memory usage stats from getrusage().
def memory():
mstr = open('/proc/self/stat').read()
mstr = mstr.split()[22]
return int(mstr)
elif sys.platform[:6] == 'darwin':
#TODO really get memory stats for OS X
def memory():
return 0
else:
try:
import resource
except ImportError:
try:
import win32process
import win32api
except ImportError:
def memory():
return 0
else:
def memory():
process_handle = win32api.GetCurrentProcess()
memory_info = win32process.GetProcessMemoryInfo( process_handle )
return memory_info['PeakWorkingSetSize']
else:
def memory():
res = resource.getrusage(resource.RUSAGE_SELF)
return res[4]
# returns caller's stack
def caller_stack():
import traceback
tb = traceback.extract_stack()
# strip itself and the caller from the output
tb = tb[:-2]
result = []
for back in tb:
# (filename, line number, function name, text)
key = back[:3]
result.append('%s:%d(%s)' % func_shorten(key))
return result
caller_bases = {}
caller_dicts = {}
def caller_trace(back=0):
"""
Trace caller stack and save info into global dicts, which
are printed automatically at the end of SCons execution.
"""
global caller_bases, caller_dicts
import traceback
tb = traceback.extract_stack(limit=3+back)
tb.reverse()
callee = tb[1][:3]
caller_bases[callee] = caller_bases.get(callee, 0) + 1
for caller in tb[2:]:
caller = callee + caller[:3]
try:
entry = caller_dicts[callee]
except KeyError:
caller_dicts[callee] = entry = {}
entry[caller] = entry.get(caller, 0) + 1
callee = caller
# print a single caller and its callers, if any
def _dump_one_caller(key, file, level=0):
leader = ' '*level
for v,c in sorted([(-v,c) for c,v in caller_dicts[key].items()]):
file.write("%s %6d %s:%d(%s)\n" % ((leader,-v) + func_shorten(c[-3:])))
if c in caller_dicts:
_dump_one_caller(c, file, level+1)
# print each call tree
def dump_caller_counts(file=sys.stdout):
for k in sorted(caller_bases.keys()):
file.write("Callers of %s:%d(%s), %d calls:\n"
% (func_shorten(k) + (caller_bases[k],)))
_dump_one_caller(k, file)
shorten_list = [
( '/scons/SCons/', 1),
( '/src/engine/SCons/', 1),
( '/usr/lib/python', 0),
]
if os.sep != '/':
shorten_list = [(t[0].replace('/', os.sep), t[1]) for t in shorten_list]
def func_shorten(func_tuple):
f = func_tuple[0]
for t in shorten_list:
i = f.find(t[0])
if i >= 0:
if t[1]:
i = i + len(t[0])
return (f[i:],)+func_tuple[1:]
return func_tuple
TraceFP = {}
if sys.platform == 'win32':
TraceDefault = 'con'
else:
TraceDefault = '/dev/tty'
TimeStampDefault = None
StartTime = time.time()
PreviousTime = StartTime
def Trace(msg, file=None, mode='w', tstamp=None):
"""Write a trace message to a file. Whenever a file is specified,
it becomes the default for the next call to Trace()."""
global TraceDefault
global TimeStampDefault
global PreviousTime
if file is None:
file = TraceDefault
else:
TraceDefault = file
if tstamp is None:
tstamp = TimeStampDefault
else:
TimeStampDefault = tstamp
try:
fp = TraceFP[file]
except KeyError:
try:
fp = TraceFP[file] = open(file, mode)
except TypeError:
# Assume we were passed an open file pointer.
fp = file
if tstamp:
now = time.time()
fp.write('%8.4f %8.4f: ' % (now - StartTime, now - PreviousTime))
PreviousTime = now
fp.write(msg)
fp.flush()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 | -6,272,530,359,477,291,000 | 30.041494 | 99 | 0.616361 | false |
caktus/rapidsms | rapidsms/backends/http/forms.py | 7 | 1876 | from django import forms
from rapidsms.router import lookup_connections
class BaseHttpForm(forms.Form):
"""Helper form for validating incoming messages.
:param backend_name: (Optional) name of the backend
"""
def __init__(self, *args, **kwargs):
"""Save backend name to form for use later"""
self.backend_name = kwargs.pop('backend_name')
super(BaseHttpForm, self).__init__(*args, **kwargs)
def lookup_connections(self, identities):
"""Simple wrapper to ease connection lookup on child forms."""
return lookup_connections(self.backend_name, identities)
def get_incoming_data(self):
"""
Return a dictionary containing the connection and text
for this message, based on the field
names passed to __init__().
Must be implemented by subclasses.
"""
raise NotImplementedError()
class GenericHttpForm(BaseHttpForm):
def __init__(self, *args, **kwargs):
"""
Saves the identify (phone number) and text field names on self, calls
super(), and then adds the required fields.
"""
# defaults to "text" and "identity"
self.text_name = kwargs.pop('text_name', 'text')
self.identity_name = kwargs.pop('identity_name', 'identity')
super(GenericHttpForm, self).__init__(*args, **kwargs)
self.fields[self.text_name] = forms.CharField()
self.fields[self.identity_name] = forms.CharField()
def get_incoming_data(self):
"""
Returns the connection and text for this message, based on the field
names passed to __init__().
"""
identity = self.cleaned_data[self.identity_name]
connections = self.lookup_connections([identity])
return {'connection': connections[0],
'text': self.cleaned_data[self.text_name]}
| bsd-3-clause | 6,591,994,300,506,421,000 | 33.740741 | 77 | 0.627932 | false |
klahnakoski/ActiveData-ETL | vendor/pyLibrary/testing/elasticsearch.py | 4 | 3437 | # encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
from jx_python import jx
from mo_dots import Data, Null, is_list, unwrap, wrap
from mo_files import File
import mo_json
from mo_kwargs import override
from mo_logs import Log
from jx_elasticsearch.elasticsearch import Cluster
@override
def make_test_instance(name, filename=None, kwargs=None):
if filename != None:
File(filename).delete()
return open_test_instance(kwargs)
@override
def open_test_instance(name, filename=None, es=None, kwargs=None):
if filename != None:
Log.note(
"Using {{filename}} as {{type}}",
filename=filename,
type=name
)
return FakeES(filename=filename)
else:
Log.note(
"Using ES cluster at {{host}} as {{type}}",
host=es.host,
type=name
)
cluster = Cluster(es)
try:
old_index = cluster.get_index(es)
cluster.delete_index(old_index.settings.index)
except Exception as e:
if "Can not find index" not in e:
Log.error("unexpected", cause=e)
output = cluster.create_index(limit_replicas=True, limit_replicas_warning=False, kwargs=es)
output.delete_all_but_self()
output.add_alias(es.index)
return output
class FakeES():
@override
def __init__(self, filename, host="fake", index="fake", kwargs=None):
self.settings = kwargs
self.file = File(filename)
self.cluster= Null
try:
self.data = mo_json.json2value(self.file.read())
except Exception as e:
self.data = Data()
def search(self, query):
query = wrap(query)
f = jx.get(query.query.filtered.filter)
filtered = wrap([{"_id": i, "_source": d} for i, d in self.data.items() if f(d)])
if query.fields:
return wrap({"hits": {"total": len(filtered), "hits": [{"_id": d._id, "fields": unwrap(jx.select([unwrap(d._source)], query.fields)[0])} for d in filtered]}})
else:
return wrap({"hits": {"total": len(filtered), "hits": filtered}})
def extend(self, records):
"""
JUST SO WE MODEL A Queue
"""
records = {
v["id"]: v["value"] if "value" in v else mo_json.json2value(v['json'])
for v in records
}
for r in records.values():
try:
del r['etl']
except Exception:
pass
unwrap(self.data).update(records)
self.refresh()
Log.note("{{num}} documents added", num=len(records))
def add(self, record):
if is_list(record):
Log.error("no longer accepting lists, use extend()")
return self.extend([record])
def delete_record(self, filter):
f = esfilter2where(filter)
self.data = wrap({k: v for k, v in self.data.items() if not f(v)})
def refresh(self, *args, **kwargs):
data_as_json = mo_json.value2json(self.data, pretty=True)
self.file.write(data_as_json)
def set_refresh_interval(self, seconds):
pass
| mpl-2.0 | 5,477,488,911,563,290,000 | 29.963964 | 170 | 0.584812 | false |
xiaoguoai/ec-dev-swift | test/unit/proxy/controllers/test_base.py | 3 | 24089 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch
from swift.proxy.controllers.base import headers_to_container_info, \
headers_to_account_info, headers_to_object_info, get_container_info, \
get_container_memcache_key, get_account_info, get_account_memcache_key, \
get_object_env_key, _get_cache_key, get_info, get_object_info, \
Controller, GetOrHeadHandler
from swift.common.swob import Request, HTTPException, HeaderKeyDict
from swift.common.utils import split_path
from test.unit import fake_http_connect, FakeRing, FakeMemcache
from swift.proxy import server as proxy_server
from swift.common.request_helpers import get_sys_meta_prefix
FakeResponse_status_int = 201
class FakeResponse(object):
def __init__(self, headers, env, account, container, obj):
self.headers = headers
self.status_int = FakeResponse_status_int
self.environ = env
if obj:
env_key = get_object_env_key(account, container, obj)
else:
cache_key, env_key = _get_cache_key(account, container)
if account and container and obj:
info = headers_to_object_info(headers, FakeResponse_status_int)
elif account and container:
info = headers_to_container_info(headers, FakeResponse_status_int)
else:
info = headers_to_account_info(headers, FakeResponse_status_int)
env[env_key] = info
class FakeRequest(object):
def __init__(self, env, path, swift_source=None):
self.environ = env
(version, account, container, obj) = split_path(path, 2, 4, True)
self.account = account
self.container = container
self.obj = obj
if obj:
stype = 'object'
self.headers = {'content-length': 5555,
'content-type': 'text/plain'}
else:
stype = container and 'container' or 'account'
self.headers = {'x-%s-object-count' % (stype): 1000,
'x-%s-bytes-used' % (stype): 6666}
if swift_source:
meta = 'x-%s-meta-fakerequest-swift-source' % stype
self.headers[meta] = swift_source
def get_response(self, app):
return FakeResponse(self.headers, self.environ, self.account,
self.container, self.obj)
class FakeCache(object):
def __init__(self, val):
self.val = val
def get(self, *args):
return self.val
class TestFuncs(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
object_ring=FakeRing)
def test_GETorHEAD_base(self):
base = Controller(self.app)
req = Request.blank('/v1/a/c/o/with/slashes')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.GETorHEAD_base(req, 'object', FakeRing(), 'part',
'/a/c/o/with/slashes')
self.assertTrue('swift.object/a/c/o/with/slashes' in resp.environ)
self.assertEqual(
resp.environ['swift.object/a/c/o/with/slashes']['status'], 200)
req = Request.blank('/v1/a/c/o')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.GETorHEAD_base(req, 'object', FakeRing(), 'part',
'/a/c/o')
self.assertTrue('swift.object/a/c/o' in resp.environ)
self.assertEqual(resp.environ['swift.object/a/c/o']['status'], 200)
req = Request.blank('/v1/a/c')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.GETorHEAD_base(req, 'container', FakeRing(), 'part',
'/a/c')
self.assertTrue('swift.container/a/c' in resp.environ)
self.assertEqual(resp.environ['swift.container/a/c']['status'], 200)
req = Request.blank('/v1/a')
with patch('swift.proxy.controllers.base.'
'http_connect', fake_http_connect(200)):
resp = base.GETorHEAD_base(req, 'account', FakeRing(), 'part',
'/a')
self.assertTrue('swift.account/a' in resp.environ)
self.assertEqual(resp.environ['swift.account/a']['status'], 200)
def test_get_info(self):
global FakeResponse_status_int
# Do a non cached call to account
env = {}
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
info_a = get_info(None, env, 'a')
# Check that you got proper info
self.assertEquals(info_a['status'], 201)
self.assertEquals(info_a['bytes'], 6666)
self.assertEquals(info_a['total_object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
# Do an env cached call to account
info_a = get_info(None, env, 'a')
# Check that you got proper info
self.assertEquals(info_a['status'], 201)
self.assertEquals(info_a['bytes'], 6666)
self.assertEquals(info_a['total_object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
# This time do env cached call to account and non cached to container
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
info_c = get_info(None, env, 'a', 'c')
# Check that you got proper info
self.assertEquals(info_a['status'], 201)
self.assertEquals(info_c['bytes'], 6666)
self.assertEquals(info_c['object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
self.assertEquals(env.get('swift.container/a/c'), info_c)
# This time do a non cached call to account than non cached to
# container
env = {} # abandon previous call to env
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
info_c = get_info(None, env, 'a', 'c')
# Check that you got proper info
self.assertEquals(info_a['status'], 201)
self.assertEquals(info_c['bytes'], 6666)
self.assertEquals(info_c['object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
self.assertEquals(env.get('swift.container/a/c'), info_c)
# This time do an env cached call to container while account is not
# cached
del(env['swift.account/a'])
info_c = get_info(None, env, 'a', 'c')
# Check that you got proper info
self.assertEquals(info_a['status'], 201)
self.assertEquals(info_c['bytes'], 6666)
self.assertEquals(info_c['object_count'], 1000)
# Make sure the env cache is set and account still not cached
self.assertEquals(env.get('swift.container/a/c'), info_c)
# Do a non cached call to account not found with ret_not_found
env = {}
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
try:
FakeResponse_status_int = 404
info_a = get_info(None, env, 'a', ret_not_found=True)
finally:
FakeResponse_status_int = 201
# Check that you got proper info
self.assertEquals(info_a['status'], 404)
self.assertEquals(info_a['bytes'], 6666)
self.assertEquals(info_a['total_object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
# Do a cached call to account not found with ret_not_found
info_a = get_info(None, env, 'a', ret_not_found=True)
# Check that you got proper info
self.assertEquals(info_a['status'], 404)
self.assertEquals(info_a['bytes'], 6666)
self.assertEquals(info_a['total_object_count'], 1000)
# Make sure the env cache is set
self.assertEquals(env.get('swift.account/a'), info_a)
# Do a non cached call to account not found without ret_not_found
env = {}
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
try:
FakeResponse_status_int = 404
info_a = get_info(None, env, 'a')
finally:
FakeResponse_status_int = 201
# Check that you got proper info
self.assertEquals(info_a, None)
self.assertEquals(env['swift.account/a']['status'], 404)
# Do a cached call to account not found without ret_not_found
info_a = get_info(None, env, 'a')
# Check that you got proper info
self.assertEquals(info_a, None)
self.assertEquals(env['swift.account/a']['status'], 404)
def test_get_container_info_swift_source(self):
req = Request.blank("/v1/a/c", environ={'swift.cache': FakeCache({})})
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
resp = get_container_info(req.environ, 'app', swift_source='MC')
self.assertEquals(resp['meta']['fakerequest-swift-source'], 'MC')
def test_get_object_info_swift_source(self):
req = Request.blank("/v1/a/c/o",
environ={'swift.cache': FakeCache({})})
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
resp = get_object_info(req.environ, 'app', swift_source='LU')
self.assertEquals(resp['meta']['fakerequest-swift-source'], 'LU')
def test_get_container_info_no_cache(self):
req = Request.blank("/v1/AUTH_account/cont",
environ={'swift.cache': FakeCache({})})
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
resp = get_container_info(req.environ, 'xxx')
self.assertEquals(resp['bytes'], 6666)
self.assertEquals(resp['object_count'], 1000)
def test_get_container_info_cache(self):
cached = {'status': 404,
'bytes': 3333,
'object_count': 10,
# simplejson sometimes hands back strings, sometimes unicodes
'versions': u"\u1F4A9"}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
resp = get_container_info(req.environ, 'xxx')
self.assertEquals(resp['bytes'], 3333)
self.assertEquals(resp['object_count'], 10)
self.assertEquals(resp['status'], 404)
self.assertEquals(resp['versions'], "\xe1\xbd\x8a\x39")
def test_get_container_info_env(self):
cache_key = get_container_memcache_key("account", "cont")
env_key = 'swift.%s' % cache_key
req = Request.blank("/v1/account/cont",
environ={env_key: {'bytes': 3867},
'swift.cache': FakeCache({})})
resp = get_container_info(req.environ, 'xxx')
self.assertEquals(resp['bytes'], 3867)
def test_get_account_info_swift_source(self):
req = Request.blank("/v1/a", environ={'swift.cache': FakeCache({})})
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
resp = get_account_info(req.environ, 'a', swift_source='MC')
self.assertEquals(resp['meta']['fakerequest-swift-source'], 'MC')
def test_get_account_info_no_cache(self):
req = Request.blank("/v1/AUTH_account",
environ={'swift.cache': FakeCache({})})
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
resp = get_account_info(req.environ, 'xxx')
self.assertEquals(resp['bytes'], 6666)
self.assertEquals(resp['total_object_count'], 1000)
def test_get_account_info_cache(self):
# The original test that we prefer to preserve
cached = {'status': 404,
'bytes': 3333,
'total_object_count': 10}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
resp = get_account_info(req.environ, 'xxx')
self.assertEquals(resp['bytes'], 3333)
self.assertEquals(resp['total_object_count'], 10)
self.assertEquals(resp['status'], 404)
# Here is a more realistic test
cached = {'status': 404,
'bytes': '3333',
'container_count': '234',
'total_object_count': '10',
'meta': {}}
req = Request.blank("/v1/account/cont",
environ={'swift.cache': FakeCache(cached)})
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
resp = get_account_info(req.environ, 'xxx')
self.assertEquals(resp['status'], 404)
self.assertEquals(resp['bytes'], '3333')
self.assertEquals(resp['container_count'], 234)
self.assertEquals(resp['meta'], {})
self.assertEquals(resp['total_object_count'], '10')
def test_get_account_info_env(self):
cache_key = get_account_memcache_key("account")
env_key = 'swift.%s' % cache_key
req = Request.blank("/v1/account",
environ={env_key: {'bytes': 3867},
'swift.cache': FakeCache({})})
resp = get_account_info(req.environ, 'xxx')
self.assertEquals(resp['bytes'], 3867)
def test_get_object_info_env(self):
cached = {'status': 200,
'length': 3333,
'type': 'application/json',
'meta': {}}
env_key = get_object_env_key("account", "cont", "obj")
req = Request.blank("/v1/account/cont/obj",
environ={env_key: cached,
'swift.cache': FakeCache({})})
resp = get_object_info(req.environ, 'xxx')
self.assertEquals(resp['length'], 3333)
self.assertEquals(resp['type'], 'application/json')
def test_get_object_info_no_env(self):
req = Request.blank("/v1/account/cont/obj",
environ={'swift.cache': FakeCache({})})
with patch('swift.proxy.controllers.base.'
'_prepare_pre_auth_info_request', FakeRequest):
resp = get_object_info(req.environ, 'xxx')
self.assertEquals(resp['length'], 5555)
self.assertEquals(resp['type'], 'text/plain')
def test_headers_to_container_info_missing(self):
resp = headers_to_container_info({}, 404)
self.assertEquals(resp['status'], 404)
self.assertEquals(resp['read_acl'], None)
self.assertEquals(resp['write_acl'], None)
def test_headers_to_container_info_meta(self):
headers = {'X-Container-Meta-Whatevs': 14,
'x-container-meta-somethingelse': 0}
resp = headers_to_container_info(headers.items(), 200)
self.assertEquals(len(resp['meta']), 2)
self.assertEquals(resp['meta']['whatevs'], 14)
self.assertEquals(resp['meta']['somethingelse'], 0)
def test_headers_to_container_info_sys_meta(self):
prefix = get_sys_meta_prefix('container')
headers = {'%sWhatevs' % prefix: 14,
'%ssomethingelse' % prefix: 0}
resp = headers_to_container_info(headers.items(), 200)
self.assertEquals(len(resp['sysmeta']), 2)
self.assertEquals(resp['sysmeta']['whatevs'], 14)
self.assertEquals(resp['sysmeta']['somethingelse'], 0)
def test_headers_to_container_info_values(self):
headers = {
'x-container-read': 'readvalue',
'x-container-write': 'writevalue',
'x-container-sync-key': 'keyvalue',
'x-container-meta-access-control-allow-origin': 'here',
}
resp = headers_to_container_info(headers.items(), 200)
self.assertEquals(resp['read_acl'], 'readvalue')
self.assertEquals(resp['write_acl'], 'writevalue')
self.assertEquals(resp['cors']['allow_origin'], 'here')
headers['x-unused-header'] = 'blahblahblah'
self.assertEquals(
resp,
headers_to_container_info(headers.items(), 200))
def test_headers_to_account_info_missing(self):
resp = headers_to_account_info({}, 404)
self.assertEquals(resp['status'], 404)
self.assertEquals(resp['bytes'], None)
self.assertEquals(resp['container_count'], None)
def test_headers_to_account_info_meta(self):
headers = {'X-Account-Meta-Whatevs': 14,
'x-account-meta-somethingelse': 0}
resp = headers_to_account_info(headers.items(), 200)
self.assertEquals(len(resp['meta']), 2)
self.assertEquals(resp['meta']['whatevs'], 14)
self.assertEquals(resp['meta']['somethingelse'], 0)
def test_headers_to_account_info_sys_meta(self):
prefix = get_sys_meta_prefix('account')
headers = {'%sWhatevs' % prefix: 14,
'%ssomethingelse' % prefix: 0}
resp = headers_to_account_info(headers.items(), 200)
self.assertEquals(len(resp['sysmeta']), 2)
self.assertEquals(resp['sysmeta']['whatevs'], 14)
self.assertEquals(resp['sysmeta']['somethingelse'], 0)
def test_headers_to_account_info_values(self):
headers = {
'x-account-object-count': '10',
'x-account-container-count': '20',
}
resp = headers_to_account_info(headers.items(), 200)
self.assertEquals(resp['total_object_count'], '10')
self.assertEquals(resp['container_count'], '20')
headers['x-unused-header'] = 'blahblahblah'
self.assertEquals(
resp,
headers_to_account_info(headers.items(), 200))
def test_headers_to_object_info_missing(self):
resp = headers_to_object_info({}, 404)
self.assertEquals(resp['status'], 404)
self.assertEquals(resp['length'], None)
self.assertEquals(resp['etag'], None)
def test_headers_to_object_info_meta(self):
headers = {'X-Object-Meta-Whatevs': 14,
'x-object-meta-somethingelse': 0}
resp = headers_to_object_info(headers.items(), 200)
self.assertEquals(len(resp['meta']), 2)
self.assertEquals(resp['meta']['whatevs'], 14)
self.assertEquals(resp['meta']['somethingelse'], 0)
def test_headers_to_object_info_values(self):
headers = {
'content-length': '1024',
'content-type': 'application/json',
}
resp = headers_to_object_info(headers.items(), 200)
self.assertEquals(resp['length'], '1024')
self.assertEquals(resp['type'], 'application/json')
headers['x-unused-header'] = 'blahblahblah'
self.assertEquals(
resp,
headers_to_object_info(headers.items(), 200))
def test_have_quorum(self):
base = Controller(self.app)
# just throw a bunch of test cases at it
self.assertEqual(base.have_quorum([201, 404], 3), False)
self.assertEqual(base.have_quorum([201, 201], 4), False)
self.assertEqual(base.have_quorum([201, 201, 404, 404], 4), False)
self.assertEqual(base.have_quorum([201, 503, 503, 201], 4), False)
self.assertEqual(base.have_quorum([201, 201], 3), True)
self.assertEqual(base.have_quorum([404, 404], 3), True)
self.assertEqual(base.have_quorum([201, 201], 2), True)
self.assertEqual(base.have_quorum([404, 404], 2), True)
self.assertEqual(base.have_quorum([201, 404, 201, 201], 4), True)
def test_range_fast_forward(self):
req = Request.blank('/')
handler = GetOrHeadHandler(None, req, None, None, None, None, {})
handler.fast_forward(50)
self.assertEquals(handler.backend_headers['Range'], 'bytes=50-')
handler = GetOrHeadHandler(None, req, None, None, None, None,
{'Range': 'bytes=23-50'})
handler.fast_forward(20)
self.assertEquals(handler.backend_headers['Range'], 'bytes=43-50')
self.assertRaises(HTTPException,
handler.fast_forward, 80)
handler = GetOrHeadHandler(None, req, None, None, None, None,
{'Range': 'bytes=23-'})
handler.fast_forward(20)
self.assertEquals(handler.backend_headers['Range'], 'bytes=43-')
handler = GetOrHeadHandler(None, req, None, None, None, None,
{'Range': 'bytes=-100'})
handler.fast_forward(20)
self.assertEquals(handler.backend_headers['Range'], 'bytes=-80')
def test_transfer_headers_with_sysmeta(self):
base = Controller(self.app)
good_hdrs = {'x-base-sysmeta-foo': 'ok',
'X-Base-sysmeta-Bar': 'also ok'}
bad_hdrs = {'x-base-sysmeta-': 'too short'}
hdrs = dict(good_hdrs)
hdrs.update(bad_hdrs)
dst_hdrs = HeaderKeyDict()
base.transfer_headers(hdrs, dst_hdrs)
self.assertEqual(HeaderKeyDict(good_hdrs), dst_hdrs)
def test_generate_request_headers(self):
base = Controller(self.app)
src_headers = {'x-remove-base-meta-owner': 'x',
'x-base-meta-size': '151M',
'new-owner': 'Kun'}
req = Request.blank('/v1/a/c/o', headers=src_headers)
dst_headers = base.generate_request_headers(req, transfer=True)
expected_headers = {'x-base-meta-owner': '',
'x-base-meta-size': '151M'}
for k, v in expected_headers.iteritems():
self.assertTrue(k in dst_headers)
self.assertEqual(v, dst_headers[k])
self.assertFalse('new-owner' in dst_headers)
def test_generate_request_headers_with_sysmeta(self):
base = Controller(self.app)
good_hdrs = {'x-base-sysmeta-foo': 'ok',
'X-Base-sysmeta-Bar': 'also ok'}
bad_hdrs = {'x-base-sysmeta-': 'too short'}
hdrs = dict(good_hdrs)
hdrs.update(bad_hdrs)
req = Request.blank('/v1/a/c/o', headers=hdrs)
dst_headers = base.generate_request_headers(req, transfer=True)
for k, v in good_hdrs.iteritems():
self.assertTrue(k.lower() in dst_headers)
self.assertEqual(v, dst_headers[k.lower()])
for k, v in bad_hdrs.iteritems():
self.assertFalse(k.lower() in dst_headers)
| apache-2.0 | -8,547,977,783,461,098,000 | 44.110487 | 79 | 0.58205 | false |
yorkerlin/shogun | applications/asp/model.py | 31 | 4100 | #
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Written (W) 2006-2008 Soeren Sonnenburg
# Written (W) 2007 Gunnar Raetsch
# Copyright (C) 2006-2008 Fraunhofer Institute FIRST and Max-Planck-Society
#
import sys
from numpy import mat,array,inf,any,reshape,int32
class model(object):
#acceptor
acc_splice_b=None
acc_splice_order=None
acc_splice_window_left=None
acc_splice_window_right=None
acc_splice_alphas=None
acc_splice_svs=None
#donor
don_splice_b=None
don_splice_order=None
don_splice_window_left=None
don_splice_window_right=None
don_splice_alphas=None
don_splice_svs=None
def parse_file(file):
m=model()
l=file.readline();
if l != '%asplicer definition file version: 1.0\n':
sys.stdout.write("\nfile not a asplicer definition file\n")
return None
while l:
if not ( l.startswith('%') or l.startswith('\n') ): # comment
#acceptor
if m.acc_splice_b is None: m.acc_splice_b=parse_value(l, 'acc_splice_b')
if m.acc_splice_order is None: m.acc_splice_order=parse_value(l, 'acc_splice_order')
if m.acc_splice_window_left is None: m.acc_splice_window_left=parse_value(l, 'acc_splice_window_left')
if m.acc_splice_window_right is None: m.acc_splice_window_right=parse_value(l, 'acc_splice_window_right')
if m.acc_splice_alphas is None: m.acc_splice_alphas=parse_vector(l, file, 'acc_splice_alphas')
if m.acc_splice_svs is None: m.acc_splice_svs=parse_string(l, file, 'acc_splice_svs')
#donor
if m.don_splice_b is None: m.don_splice_b=parse_value(l, 'don_splice_b')
if m.don_splice_order is None: m.don_splice_order=parse_value(l, 'don_splice_order')
if m.don_splice_window_left is None: m.don_splice_window_left=parse_value(l, 'don_splice_window_left')
if m.don_splice_window_right is None: m.don_splice_window_right=parse_value(l, 'don_splice_window_right')
if m.don_splice_alphas is None: m.don_splice_alphas=parse_vector(l, file, 'don_splice_alphas')
if m.don_splice_svs is None: m.don_splice_svs=parse_string(l, file, 'don_splice_svs')
l=file.readline()
sys.stdout.write('done\n')
return m
def parse_value(line, name):
if (line.startswith(name)):
sys.stdout.write('.'); sys.stdout.flush()
return float(line[line.find('=')+1:-1])
else:
return None
def parse_vector(line, file, name):
mat = parse_matrix(line, file, name)
if mat is None:
return mat
else:
mat = array(mat).flatten()
return mat
def parse_matrix(line, file, name):
if (line.startswith(name)):
sys.stdout.write('.'); sys.stdout.flush()
if line.find(']') < 0:
l=''
while l is not None and l.find(']') < 0:
line+=l
l=file.readline()
if l is not None and l.find(']') >= 0:
line+=l
if line.find(']') < 0:
sys.stdout.write("matrix `" + name + "' ended without ']'\n")
return None
else:
mm = mat(line[line.find('['):line.find(']')+1])
if len(mm.shape)==1:
mm = reshape(mm.shape[0],1)
return mm
else:
return None
def parse_string(line, file, name):
if (line.startswith(name)):
sys.stdout.write('.'); sys.stdout.flush()
l=''
lines=[]
while l is not None and l.find(']') < 0:
if l:
lines.append(l[:-1])
l=file.readline()
if l.find(']') < 0:
sys.stdout.write("string ended without ']'\n")
return None
else:
return lines
else:
return None
if __name__ == '__main__':
import bz2
import sys
import hotshot, hotshot.stats
def load():
f=file('data/asp_test.dat');
m=parse_file(f);
print m.acc_splice_b is None
print m.acc_splice_order is None
print m.acc_splice_window_left is None
print m.acc_splice_window_right is None
print m.acc_splice_alphas is None
print m.acc_splice_svs is None
print m.don_splice_b is None
print m.don_splice_order is None
print m.don_splice_window_left is None
print m.don_splice_window_right is None
print m.don_splice_alphas is None
print m.don_splice_svs is None
load()
| gpl-3.0 | 9,219,667,901,655,523,000 | 27.671329 | 108 | 0.679756 | false |
austinzheng/swift | utils/cmpcodesize/cmpcodesize/compare.py | 18 | 12566 | # ====--- compare.py - Compare built products' sizes -*- coding: utf-8 -*-===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import print_function
import collections
import os
import re
import subprocess
from operator import itemgetter
categories = [
# Cpp
["CPP", re.compile('^(__Z|_+swift)')],
# Objective-C
["ObjC", re.compile(r'^[+-]\[')],
# Swift
["Partial Apply", re.compile('^__(TPA|T0.*T[aA]$)')],
["Protocol Witness", re.compile('^__(TTW|T0.*TW$)')],
["Value Witness", re.compile('^__(Tw|T0.*w..$)')],
["Type Metadata", re.compile('^__(TM|T0.*(N|M.)$)')],
# Function signature specialization of a generic specialization.
["FuncSigGen Spec", re.compile(
'^__(TTSf.*__TTSg|T0.*T[gGpP]q?[0-9].*Tfq?[0-9])')],
["Generic Spec", re.compile('^__(TTSg|T0.*T[gG]q?[0-9])')],
["Partial Spec", re.compile('^__(T0.*T[pP]q?[0-9])')],
["FuncSig Spec", re.compile('^__(TTSf|T0.*Tfq?[0-9])')],
["Generic Function", re.compile(
'__(T[^0].*q(x|d?[0-9]*_)|T0.*q(z|d?[0-9]*_))')],
["Static Func", re.compile('^__(TZF|T0.*FZ)')],
["Swift @objc Func", re.compile('^__(TTo|T0.*To$)')],
["Accessor", re.compile('^__(TW[atTlI]|T0.*W[atTlI]$)')],
["Getter/Setter", re.compile('^__(T[Fvi][gsmwWl]|T0.*f[gGsmwWal]$)')],
["Swift Function", re.compile('^__(TF|T0.*(F|f.|f[AuU][0-9]*_)$)')],
["Unknown", re.compile('')]
]
def add_function(sizes, function, start_addr, end_addr, group_by_prefix):
if not function or start_addr is None or end_addr is None:
return
size = end_addr - start_addr
if group_by_prefix:
if function.endswith('_merged'):
function = function[:-7]
for cat in categories:
cat_name = cat[0]
pattern = cat[1]
if pattern.match(function):
sizes[cat_name] += size
return
assert False, "function name not matching any pattern"
else:
sizes[function] += size
def flatten(*args):
for x in args:
if hasattr(x, '__iter__'):
for y in flatten(*x):
yield y
else:
yield x
def read_sizes(sizes, file_name, function_details, group_by_prefix):
# Check if multiple architectures are supported by the object file.
# Prefer arm64 if available.
architectures = subprocess.check_output(
["otool", "-V", "-f", file_name]).split("\n")
arch = None
arch_pattern = re.compile(r'architecture ([\S]+)')
for architecture in architectures:
arch_match = arch_pattern.match(architecture)
if arch_match:
if arch is None:
arch = arch_match.group(1)
if "arm64" in arch:
arch = "arm64"
if arch is not None:
arch_params = ["-arch", arch]
else:
arch_params = []
if function_details:
content = subprocess.check_output(
flatten([
"otool",
arch_params,
"-l",
"-v",
"-t",
file_name]
)).split("\n")
content += subprocess.check_output(flatten(
["otool", arch_params, "-v", "-s", "__TEXT", "__textcoal_nt",
file_name])).split("\n")
else:
content = subprocess.check_output(
flatten(["otool", arch_params, "-l", file_name])).split("\n")
sect_name = None
curr_func = None
start_addr = None
end_addr = None
section_pattern = re.compile(r' +sectname ([\S]+)')
size_pattern = re.compile(r' +size ([\da-fx]+)')
asmline_pattern = re.compile(r'^([0-9a-fA-F]+)\s')
label_pattern = re.compile(r'^((\-*\[[^\]]*\])|[^\/\s]+):$')
for line in content:
asmline_match = asmline_pattern.match(line)
if asmline_match:
addr = int(asmline_match.group(1), 16)
if start_addr is None:
start_addr = addr
end_addr = addr
elif line == "Section":
sect_name = None
else:
label_match = label_pattern.match(line)
size_match = size_pattern.match(line)
section_match = section_pattern.match(line)
if label_match:
func_name = label_match.group(1)
add_function(sizes, curr_func, start_addr,
end_addr, group_by_prefix)
curr_func = func_name
start_addr = None
end_addr = None
elif size_match and sect_name and group_by_prefix:
size = int(size_match.group(1), 16)
sizes[sect_name] += size
elif section_match:
sect_name = section_match.group(1)
if sect_name == "__textcoal_nt":
sect_name = "__text"
add_function(sizes, curr_func, start_addr, end_addr, group_by_prefix)
def compare_sizes(old_sizes, new_sizes, name_key, title, total_size_key="",
csv=None):
old_size = old_sizes[name_key]
new_size = new_sizes[name_key]
if total_size_key:
old_total_size = old_sizes[total_size_key]
new_total_size = new_sizes[total_size_key]
if old_size is not None and new_size is not None:
if old_size != 0:
perc = "%.1f%%" % (
(float(new_size) / float(old_size) - 1.0) * 100.0)
else:
perc = "- "
if total_size_key:
if csv:
csv.writerow([title, name_key,
old_size, old_size * 100.0 / old_total_size,
new_size, new_size * 100.0 / new_total_size,
perc])
else:
print("%-26s%16s: %8d (%2d%%) %8d (%2d%%) %7s" %
(title, name_key,
old_size, old_size * 100.0 / old_total_size,
new_size, new_size * 100.0 / new_total_size,
perc))
else:
if csv:
csv.writerow([title, name_key,
old_size, "",
new_size, "",
perc])
else:
print("%-26s%16s: %14d %14d %7s" %
(title, name_key, old_size, new_size, perc))
def compare_sizes_of_file(old_files, new_files, all_sections, list_categories,
csv=None):
old_sizes = collections.defaultdict(int)
new_sizes = collections.defaultdict(int)
for old_file in old_files:
read_sizes(old_sizes, old_file, list_categories, True)
for new_file in new_files:
read_sizes(new_sizes, new_file, list_categories, True)
if len(old_files) == 1 and len(new_files) == 1:
old_base = os.path.basename(old_files[0])
new_base = os.path.basename(new_files[0])
title = old_base
if old_base != new_base:
title += "-" + new_base
else:
title = "old-new"
compare_sizes(old_sizes, new_sizes, "__text", title, "", csv=csv)
if list_categories:
for cat in categories:
cat_name = cat[0]
compare_sizes(old_sizes, new_sizes, cat_name, "", "__text",
csv=csv)
if all_sections:
section_title = " section"
compare_sizes(old_sizes, new_sizes, "__textcoal_nt", section_title,
csv=csv)
compare_sizes(old_sizes, new_sizes, "__stubs", section_title, csv=csv)
compare_sizes(old_sizes, new_sizes, "__const", section_title, csv=csv)
compare_sizes(old_sizes, new_sizes, "__cstring", section_title,
csv=csv)
compare_sizes(old_sizes, new_sizes, "__objc_methname", section_title,
csv=csv)
compare_sizes(old_sizes, new_sizes, "__const", section_title, csv=csv)
compare_sizes(old_sizes, new_sizes, "__objc_const", section_title,
csv=csv)
compare_sizes(old_sizes, new_sizes, "__data", section_title, csv=csv)
compare_sizes(old_sizes, new_sizes, "__swift5_proto", section_title,
csv=csv)
compare_sizes(old_sizes, new_sizes, "__common", section_title, csv=csv)
compare_sizes(old_sizes, new_sizes, "__bss", section_title, csv=csv)
def list_function_sizes(size_array):
for pair in sorted(size_array, key=itemgetter(1)):
name = pair[0]
size = pair[1]
yield "%8d %s" % (size, name)
def compare_function_sizes(old_files, new_files, csv=None):
old_sizes = collections.defaultdict(int)
new_sizes = collections.defaultdict(int)
for name in old_files:
read_sizes(old_sizes, name, True, False)
for name in new_files:
read_sizes(new_sizes, name, True, False)
only_in_file1 = []
only_in_file2 = []
in_both = []
only_in_file1size = 0
only_in_file2size = 0
in_both_size = 0
for func, old_size in old_sizes.items():
new_size = new_sizes[func]
if new_size != 0:
in_both.append((func, old_size, new_size))
else:
only_in_file1.append((func, old_size))
only_in_file1size += old_size
for func, new_size in new_sizes.items():
old_size = old_sizes[func]
if old_size == 0:
only_in_file2.append((func, new_size))
only_in_file2size += new_size
if only_in_file1:
if csv:
csv.writerow(["Only in old", "", "", ""])
for name, size in sorted(only_in_file1, key=itemgetter(1)):
csv.writerow([size, name, "", ""])
csv.writerow(["Total size only in old", only_in_file1size, "", ""])
else:
print("Only in old file(s)")
print(os.linesep.join(list_function_sizes(only_in_file1)))
print("Total size of functions only in old file: {}".format(
only_in_file1size))
print()
if only_in_file2:
if csv:
csv.writerow(["Only in new", "", "", ""])
for name, size in sorted(only_in_file2, key=itemgetter(1)):
csv.writerow([size, name, "", ""])
csv.writerow(["Total size only in new", only_in_file2size, "", ""])
else:
print("Only in new files(s)")
print(os.linesep.join(list_function_sizes(only_in_file2)))
print("Total size of functions only in new file: {}".format(
only_in_file2size))
print()
if in_both:
size_increase = 0
size_decrease = 0
header = ("old", "new", "diff")
if csv:
csv.writerow(list(header) + ["function"])
else:
print("%8s %8s %8s" % header)
for triple in sorted(
in_both,
key=lambda tup: (tup[2] - tup[1], tup[1])):
func = triple[0]
old_size = triple[1]
new_size = triple[2]
diff = new_size - old_size
if diff > 0:
size_increase += diff
else:
size_decrease -= diff
if diff == 0:
in_both_size += new_size
if csv:
csv.writerow([old_size, new_size, new_size - old_size, func])
else:
print("%8d %8d %8d %s" %
(old_size, new_size, new_size - old_size, func))
if csv:
csv.writerow(["Total size in both", "Total size smaller",
"Total size bigger", "Total size change in both"])
csv.writerow([in_both_size, size_decrease, size_increase,
(size_increase - size_decrease)])
else:
print("Total size of functions " +
"with the same size in both files: {}".format(in_both_size))
print("Total size of functions " +
"that got smaller: {}".format(size_decrease))
print("Total size of functions " +
"that got bigger: {}".format(size_increase))
print("Total size change of functions present " +
"in both files: {}".format(size_increase - size_decrease))
| apache-2.0 | -8,552,913,735,851,363,000 | 35.109195 | 79 | 0.517587 | false |
alexbruy/QGIS | python/plugins/processing/gui/MultipleInputDialog.py | 3 | 3895 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MultipleInputDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QDialog, QAbstractItemView, QPushButton, QDialogButtonBox
from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgMultipleSelection.ui'))
class MultipleInputDialog(BASE, WIDGET):
def __init__(self, options, selectedoptions=None):
super(MultipleInputDialog, self).__init__(None)
self.setupUi(self)
self.lstLayers.setSelectionMode(QAbstractItemView.NoSelection)
self.options = options
self.selectedoptions = selectedoptions or []
# Additional buttons
self.btnSelectAll = QPushButton(self.tr('Select all'))
self.buttonBox.addButton(self.btnSelectAll,
QDialogButtonBox.ActionRole)
self.btnClearSelection = QPushButton(self.tr('Clear selection'))
self.buttonBox.addButton(self.btnClearSelection,
QDialogButtonBox.ActionRole)
self.btnToggleSelection = QPushButton(self.tr('Toggle selection'))
self.buttonBox.addButton(self.btnToggleSelection,
QDialogButtonBox.ActionRole)
self.btnSelectAll.clicked.connect(lambda: self.selectAll(True))
self.btnClearSelection.clicked.connect(lambda: self.selectAll(False))
self.btnToggleSelection.clicked.connect(self.toggleSelection)
self.populateList()
def populateList(self):
model = QStandardItemModel()
for i, option in enumerate(self.options):
item = QStandardItem(option)
item.setCheckState(Qt.Checked if i in self.selectedoptions else Qt.Unchecked)
item.setCheckable(True)
model.appendRow(item)
self.lstLayers.setModel(model)
def accept(self):
self.selectedoptions = []
model = self.lstLayers.model()
for i in xrange(model.rowCount()):
item = model.item(i)
if item.checkState() == Qt.Checked:
self.selectedoptions.append(i)
QDialog.accept(self)
def reject(self):
self.selectedoptions = None
QDialog.reject(self)
def selectAll(self, value):
model = self.lstLayers.model()
for i in xrange(model.rowCount()):
item = model.item(i)
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def toggleSelection(self):
model = self.lstLayers.model()
for i in xrange(model.rowCount()):
item = model.item(i)
checked = item.checkState() == Qt.Checked
item.setCheckState(Qt.Unchecked if checked else Qt.Checked)
| gpl-2.0 | -8,104,855,217,735,489,000 | 37.186275 | 89 | 0.570988 | false |
gtko/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/worldstarhiphop.py | 13 | 1885 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class WorldStarHipHopIE(InfoExtractor):
_VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
_TEST = {
"url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO",
"md5": "9d04de741161603bf7071bbf4e883186",
"info_dict": {
"id": "wshh6a7q1ny0G34ZwuIO",
"ext": "mp4",
"title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
}
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage_src = self._download_webpage(url, video_id)
m_vevo_id = re.search(r'videoId=(.*?)&?',
webpage_src)
if m_vevo_id is not None:
return self.url_result('vevo:%s' % m_vevo_id.group(1), ie='Vevo')
video_url = self._search_regex(
r'so\.addVariable\("file","(.*?)"\)', webpage_src, 'video URL')
if 'youtube' in video_url:
return self.url_result(video_url, ie='Youtube')
video_title = self._html_search_regex(
r"<title>(.*)</title>", webpage_src, 'title')
# Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
thumbnail = self._html_search_regex(
r'rel="image_src" href="(.*)" />', webpage_src, 'thumbnail',
fatal=False)
if not thumbnail:
_title = r"""candytitles.*>(.*)</span>"""
mobj = re.search(_title, webpage_src)
if mobj is not None:
video_title = mobj.group(1)
return {
'id': video_id,
'url': video_url,
'title': video_title,
'thumbnail': thumbnail,
}
| gpl-3.0 | 9,005,061,584,498,543,000 | 32.660714 | 102 | 0.540584 | false |
scavarda/mysql-dbcompare | mysql-utilities-1.6.0/mysql/utilities/common/server.py | 1 | 80854 | #
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This module contains an abstraction of a MySQL server object used
by multiple utilities. It also contains helper methods for common
server operations used in multiple utilities.
"""
import os
import re
import socket
import string
import subprocess
import tempfile
import threading
import mysql.connector
from mysql.connector.constants import ClientFlag
from mysql.connector.errorcode import CR_SERVER_LOST
from mysql.utilities.exception import (ConnectionValuesError, UtilError,
UtilDBError, UtilRplError)
from mysql.utilities.common.user import User
from mysql.utilities.common.tools import (delete_directory, execute_script,
ping_host)
from mysql.utilities.common.ip_parser import (parse_connection, hostname_is_ip,
clean_IPv6, format_IPv6)
_FOREIGN_KEY_SET = "SET foreign_key_checks = {0}"
_AUTOCOMMIT_SET = "SET AUTOCOMMIT = {0}"
_GTID_ERROR = ("The server %s:%s does not comply to the latest GTID "
"feature support. Errors:")
def tostr(value):
"""Cast value to str except when None
value[in] Value to be cast to str
Returns value as str instance or None.
"""
return None if value is None else str(value)
class MySQLUtilsCursorRaw(mysql.connector.cursor.MySQLCursorRaw):
"""
Cursor for Connector/Python v2.0, returning str instead of bytearray
"""
def fetchone(self):
row = self._fetch_row()
if row:
return tuple([tostr(v) for v in row])
return None
def fetchall(self):
rows = []
all_rows = super(MySQLUtilsCursorRaw, self).fetchall()
for row in all_rows:
rows.append(tuple([tostr(v) for v in row]))
return rows
class MySQLUtilsCursorBufferedRaw(
mysql.connector.cursor.MySQLCursorBufferedRaw):
"""
Cursor for Connector/Python v2.0, returning str instead of bytearray
"""
def fetchone(self):
row = self._fetch_row()
if row:
return tuple([tostr(v) for v in row])
return None
def fetchall(self):
if self._rows is None:
raise mysql.connector.InterfaceError(
"No result set to fetch from."
)
rows = []
all_rows = [r for r in self._rows[self._next_row:]]
for row in all_rows:
rows.append(tuple([tostr(v) for v in row]))
return rows
def get_connection_dictionary(conn_info, ssl_dict=None):
"""Get the connection dictionary.
The method accepts one of the following types for conn_info:
- dictionary containing connection information including:
(user, passwd, host, port, socket)
- connection string in the form: user:pass@host:port:socket or
login-path:port:socket
- an instance of the Server class
conn_info[in] Connection information
ssl_dict[in] A dictionary with the ssl certificates
(ssl_ca, ssl_cert and ssl_key).
Returns dict - dictionary for connection (user, passwd, host, port, socket)
"""
if conn_info is None:
return conn_info
conn_val = {}
if isinstance(conn_info, dict) and 'host' in conn_info:
# Not update conn_info if already has any ssl certificate.
if (ssl_dict is not None and
not (conn_info.get("ssl_ca", None) or
conn_info.get("ssl_cert", None) or
conn_info.get("ssl_key", None))):
conn_info.update(ssl_dict)
conn_val = conn_info
elif isinstance(conn_info, Server):
# get server's dictionary
conn_val = conn_info.get_connection_values()
elif isinstance(conn_info, basestring):
# parse the string
conn_val = parse_connection(conn_info, options=ssl_dict)
else:
raise ConnectionValuesError("Cannot determine connection information"
" type.")
return conn_val
def _print_connection(prefix, conn_info):
"""Print connection information
The method accepts one of the following types for conn_info:
- dictionary containing connection information including:
(user, passwd, host, port, socket)
- connection string in the form: user:pass@host:port:socket or
login-path:port:socket
- an instance of the Server class
conn_info[in] Connection information
"""
conn_val = get_connection_dictionary(conn_info)
print "# %s on %s: ..." % (prefix, conn_val["host"]),
def get_local_servers(all_proc=False, start=3306, end=3333,
datadir_prefix=None):
"""Check to see if there are any servers running on the local host.
This method attempts to locate all running servers. If provided, it will
also limit the search to specific ports of datadirectory prefixes.
This method uses ps for posix systems and netstat for Windows machines
to determine the list of running servers.
For posix, it matches on the datadir and if datadir is the path for the
test directory, the server will be added to the list.
For nt, it matches on the port in the range starting_port,
starting_port + 10.
all_proc[in] If True, find all processes else only user processes
start[in] For Windows/NT systems: Starting port value to search.
Default = 3306
end[in] For Windows/NT systems: Ending port value to search.
Default = 3333
datadir_prefix[in] For posix systems, if not None, find only those servers
whose datadir starts with this prefix.
Returns list - tuples of the form: (process_id, [datadir|port])
"""
processes = []
if os.name == "posix":
tmp_file = tempfile.TemporaryFile()
if all_proc:
subprocess.call(["ps", "-A"], stdout=tmp_file)
else:
subprocess.call(["ps", "-f"], stdout=tmp_file)
tmp_file.seek(0)
for line in tmp_file.readlines():
mysqld_safe = False
mysqld = False
datadir = False
grep = False
datadir_arg = ""
proginfo = string.split(line)
for arg in proginfo:
if "datadir" in arg:
datadir = True
datadir_arg = arg
if "mysqld" in arg:
mysqld = True
if "mysqld_safe" in arg:
mysqld_safe = True
if "grep" in arg:
grep = True
# Check to see if this is a mysqld server and not mysqld_safe proc
if ((mysqld and datadir) or (mysqld and not grep)) and \
not mysqld_safe:
# If provided, check datadir prefix
if all_proc:
proc_id = proginfo[0]
else:
proc_id = proginfo[1]
if datadir_prefix is not None:
if datadir_prefix in datadir_arg:
processes.append((proc_id, datadir_arg[10:]))
else:
processes.append((proc_id, datadir_arg[10:]))
elif os.name == "nt":
f_out = open("portlist", 'w+')
execute_script("netstat -anop tcp", "portlist")
f_out = open("portlist", 'r')
for line in f_out.readlines():
proginfo = string.split(line)
if proginfo:
# Look for port on either local or foreign address
port = proginfo[1][proginfo[1].find(":") + 1:]
if proginfo[1][0] == '0' and port.isdigit():
if int(port) >= start and int(port) <= end:
processes.append((proginfo[4], port))
break
if len(proginfo) > 2:
port = proginfo[2][proginfo[2].find(":") + 1:]
if port.isdigit() and \
int(port) >= int(start) and int(port) <= int(end):
processes.append((proginfo[4], port))
break
f_out.close()
os.unlink("portlist")
return processes
def get_server(name, values, quiet, verbose=False):
"""Connect to a server and return Server instance
If the name is 'master' or 'slave', the connection will be made via the
Master or Slave class else a normal Server class shall be used.
name[in] Name of the server.
values[in] Dictionary of connection values.
quiet[in] If True, do not print messages.
verbose[in] Verbose value used by the returned server instances.
By default False.
Returns Server class instance
"""
from mysql.utilities.common.replication import Master, Slave
server_conn = None
# Try to connect to the MySQL database server.
if not quiet:
_print_connection(name, values)
server_options = {
'conn_info': values,
'role': name,
'verbose': verbose,
}
if name.lower() == 'master':
server_conn = Master(server_options)
elif name.lower() == 'slave':
server_conn = Slave(server_options)
else:
server_conn = Server(server_options)
try:
server_conn.connect()
except:
if not quiet:
print("")
raise
return server_conn
def _require_version(server, version):
"""Check version of server
server[in] Server instance
version[in] minimal version of the server required
Returns boolean - True = version Ok, False = version < required
"""
if version is not None and server is not None:
major, minor, rel = version.split(".")
if not server.check_version_compat(major, minor, rel):
return False
return True
def get_server_state(server, host, pingtime=3, verbose=False):
"""Return the state of the server.
This method returns one of the following states based on the
criteria shown.
UP - server is connected
WARN - server is not connected but can be pinged
DOWN - server cannot be pinged nor is connected
server[in] Server class instance
host[in] host name to ping if server is not connected
pingtime[in] timeout in seconds for ping operation
Default = 3 seconds
verbose[in] if True, show ping status messages
Default = False
Returns string - state
"""
if verbose:
print "# Attempting to contact %s ..." % host,
if server is not None and server.is_alive():
if verbose:
print "Success"
return "UP"
elif ping_host(host, pingtime):
if verbose:
print "Server is reachable"
return "WARN"
if verbose:
print "FAIL"
return "DOWN"
def connect_servers(src_val, dest_val, options=None):
"""Connect to a source and destination server.
This method takes two groups of --server=user:password@host:port:socket
values and attempts to connect one as a source connection and the other
as the destination connection. If the source and destination are the
same server and the unique parameter is False, destination is set to None.
The method accepts one of the following types for the src_val and dest_val:
- dictionary containing connection information including:
(user, passwd, host, port, socket)
- connection string in the form: user:pass@host:port:socket or
login-path:port:socket or
config-path[group]
- an instance of the Server class
src_val[in] source connection information
dest_val[in] destination connection information
options[in] options to control behavior:
quiet do not print any information during the operation
(default is False)
version if specified (default is None), perform version
checking and fail if server version is < version
specified - an exception is raised
src_name name to use for source server
(default is "Source")
dest_name name to use for destination server
(default is "Destination")
unique if True, servers must be different when dest_val is
not None (default is False)
verbose Verbose value used by the returned server instances
(default is False).
Returns tuple (source, destination) where
source = connection to source server
destination = connection to destination server (set to None)
if source and destination are same server
if error, returns (None, None)
"""
if options is None:
options = {}
quiet = options.get("quiet", False)
src_name = options.get("src_name", "Source")
dest_name = options.get("dest_name", "Destination")
version = options.get("version", None)
charset = options.get("charset", None)
verbose = options.get('verbose', False)
ssl_dict = {}
if options.get("ssl_cert", None) is not None:
ssl_dict['ssl_cert'] = options.get("ssl_cert")
if options.get("ssl_ca", None) is not None:
ssl_dict['ssl_ca'] = options.get("ssl_ca", None)
if options.get("ssl_key", None) is not None:
ssl_dict['ssl_key'] = options.get("ssl_key", None)
source = None
destination = None
# Get connection dictionaries
src_dict = get_connection_dictionary(src_val, ssl_dict)
if "]" in src_dict['host']:
src_dict['host'] = clean_IPv6(src_dict['host'])
dest_dict = get_connection_dictionary(dest_val)
if dest_dict and "]" in dest_dict['host']:
dest_dict['host'] = clean_IPv6(dest_dict['host'])
# Add character set
if src_dict and charset:
src_dict["charset"] = charset
if dest_dict and charset:
dest_dict["charset"] = charset
# Check for uniqueness - dictionary
if options.get("unique", False) and dest_dict is not None:
dupes = False
if "unix_socket" in src_dict and "unix_socket" in dest_dict:
dupes = (src_dict["unix_socket"] == dest_dict["unix_socket"])
else:
dupes = (src_dict["port"] == dest_dict["port"]) and \
(src_dict["host"] == dest_dict["host"])
if dupes:
raise UtilError("You must specify two different servers "
"for the operation.")
# If we're cloning so use same server for faster copy
cloning = dest_dict is None or (src_dict == dest_dict)
# Connect to the source server and check version
if isinstance(src_val, Server):
source = src_val
else:
source = get_server(src_name, src_dict, quiet, verbose=verbose)
if not quiet:
print "connected."
if not _require_version(source, version):
raise UtilError("The %s version is incompatible. Utility "
"requires version %s or higher." %
(src_name, version))
# If not cloning, connect to the destination server and check version
if not cloning:
if isinstance(dest_val, Server):
destination = dest_val
else:
destination = get_server(dest_name, dest_dict, quiet,
verbose=verbose)
if not quiet:
print "connected."
if not _require_version(destination, version):
raise UtilError("The %s version is incompatible. Utility "
"requires version %s or higher." %
(dest_name, version))
elif not quiet and dest_dict is not None and \
not isinstance(dest_val, Server):
try:
_print_connection(dest_name, src_dict)
print "connected."
except:
print("")
raise
return (source, destination)
def test_connect(conn_info, throw_errors=False, ssl_dict=None):
"""Test connection to a server.
The method accepts one of the following types for conn_info:
- dictionary containing connection information including:
(user, passwd, host, port, socket)
- connection string in the form: user:pass@host:port:socket or
login-path:port:socket or
config-path[group]
- an instance of the Server class
conn_info[in] Connection information
throw_errors throw any errors found during the test,
false by default.
ssl_dict[in] A dictionary with the ssl certificates
(ssl_ca, ssl_cert and ssl_key).
Returns True if connection success, False if error
"""
# Parse source connection values
try:
src_val = get_connection_dictionary(conn_info, ssl_dict)
except Exception as err:
raise ConnectionValuesError("Server connection values invalid: {0}."
"".format(err))
try:
conn_options = {
'quiet': True,
'src_name': "test",
'dest_name': None,
}
s = connect_servers(src_val, None, conn_options)
s[0].disconnect()
except UtilError:
if throw_errors:
raise
return False
return True
def check_hostname_alias(server1_vals, server2_vals):
"""Check to see if the servers are the same machine by host name.
server1_vals[in] connection dictionary for server1
server2_vals[in] connection dictionary for server2
Returns bool - true = server1 and server2 are the same host
"""
server1 = Server({'conn_info': server1_vals})
server2 = Server({'conn_info': server2_vals})
return (server1.is_alias(server2.host) and
int(server1.port) == int(server2.port))
def stop_running_server(server, wait=10, drop=True):
"""Stop a running server.
This method will stop a server using the mysqladmin utility to
shutdown the server. It also destroys the datadir.
server[in] Server instance to clone
wait[in] Number of wait cycles for shutdown
default = 10
drop[in] If True, drop datadir
Returns - True = server shutdown, False - unknown state or error
"""
# Nothing to do if server is None
if server is None:
return True
# Build the shutdown command
cmd = ""
res = server.show_server_variable("basedir")
mysqladmin_client = "mysqladmin"
if not os.name == "posix":
mysqladmin_client = "mysqladmin.exe"
mysqladmin_path = os.path.normpath(os.path.join(res[0][1], "bin",
mysqladmin_client))
if not os.path.exists(mysqladmin_path):
mysqladmin_path = os.path.normpath(os.path.join(res[0][1], "client",
mysqladmin_client))
if not os.path.exists(mysqladmin_path) and not os.name == 'posix':
mysqladmin_path = os.path.normpath(os.path.join(res[0][1],
"client/debug",
mysqladmin_client))
if not os.path.exists(mysqladmin_path) and not os.name == 'posix':
mysqladmin_path = os.path.normpath(os.path.join(res[0][1],
"client/release",
mysqladmin_client))
cmd += mysqladmin_path
if server.socket is None and server.host == 'localhost':
server.host = '127.0.0.1'
cmd += " shutdown --user={0} --host={1} ".format(server.user, server.host)
if server.passwd:
cmd = "{0} --password={1} ".format(cmd, server.passwd)
# Use of server socket only works with 'localhost' (not with 127.0.0.1).
if server.socket and server.host == 'localhost':
cmd = "{0} --socket={1} ".format(cmd, server.socket)
else:
cmd = "{0} --port={1} ".format(cmd, server.port)
if server.has_ssl:
if server.ssl_cert is not None:
cmd = "{0} --ssl-cert={1} ".format(cmd, server.ssl_cert)
if server.ssl_ca is not None:
cmd = "{0} --ssl-ca={1} ".format(cmd, server.ssl_ca)
if server.ssl_key is not None:
cmd = "{0} --ssl-key={1} ".format(cmd, server.ssl_key)
res = server.show_server_variable("datadir")
datadir = res[0][1]
# Kill all connections so shutdown will work correctly
res = server.exec_query("SHOW PROCESSLIST")
for row in res:
if not row[7] or not row[7].upper().startswith("SHOW PROCESS"):
try:
server.exec_query("KILL CONNECTION %s" % row[0])
except UtilDBError: # Ok to ignore KILL failures
pass
# disconnect user
server.disconnect()
# Stop the server
f_null = os.devnull
f_out = open(f_null, 'w')
proc = subprocess.Popen(cmd, shell=True,
stdout=f_out, stderr=f_out)
ret_val = proc.wait()
f_out.close()
# if shutdown doesn't work, exit.
if int(ret_val) != 0:
return False
# If datadir exists, delete it
if drop:
delete_directory(datadir)
if os.path.exists("cmd.txt"):
try:
os.unlink("cmd.txt")
except:
pass
return True
class Server(object):
"""The Server class can be used to connect to a running MySQL server.
The following utilities are provided:
- Connect to the server
- Retrieve a server variable
- Execute a query
- Return list of all databases
- Return a list of specific objects for a database
- Return list of a specific objects for a database
- Return list of all indexes for a table
- Read SQL statements from a file and execute
"""
def __init__(self, options=None):
"""Constructor
The method accepts one of the following types for options['conn_info']:
- dictionary containing connection information including:
(user, passwd, host, port, socket)
- connection string in the form: user:pass@host:port:socket or
login-path:port:socket
- an instance of the Server class
options[in] options for controlling behavior:
conn_info a dictionary containing connection information
(user, passwd, host, port, socket)
role Name or role of server (e.g., server, master)
verbose print extra data during operations (optional)
default value = False
charset Default character set for the connection.
(default None)
"""
if options is None:
options = {}
assert not options.get("conn_info") is None
self.verbose = options.get("verbose", False)
self.db_conn = None
self.host = None
self.role = options.get("role", "Server")
self.has_ssl = False
conn_values = get_connection_dictionary(options.get("conn_info"))
try:
self.host = conn_values["host"]
self.user = conn_values["user"]
self.passwd = conn_values["passwd"] \
if "passwd" in conn_values else None
self.socket = conn_values["unix_socket"] \
if "unix_socket" in conn_values else None
self.port = 3306
if conn_values["port"] is not None:
self.port = int(conn_values["port"])
self.charset = options.get("charset",
conn_values.get("charset", None))
# Optional values
self.ssl_ca = conn_values.get('ssl_ca', None)
self.ssl_cert = conn_values.get('ssl_cert', None)
self.ssl_key = conn_values.get('ssl_key', None)
if self.ssl_cert or self.ssl_ca or self.ssl_key:
self.has_ssl = True
except KeyError:
raise UtilError("Dictionary format not recognized.")
self.connect_error = None
# Set to TRUE when foreign key checks are ON. Check with
# foreign_key_checks_enabled.
self.fkeys = None
self.autocommit = None
self.read_only = False
self.aliases = set()
self.grants_enabled = None
self._version = None
@classmethod
def fromServer(cls, server, conn_info=None):
""" Create a new server instance from an existing one
Factory method that will allow the creation of a new server instance
from an existing server.
server[in] instance object that must be instance of the Server
class or a subclass.
conn_info[in] A dictionary with the connection information to
connect to the server
Returns an instance of the calling class as a result.
"""
if isinstance(server, Server):
options = {"role": server.role,
"verbose": server.verbose,
"charset": server.charset}
if conn_info is not None and isinstance(conn_info, dict):
options["conn_info"] = conn_info
else:
options["conn_info"] = server.get_connection_values()
return cls(options)
else:
raise TypeError("The server argument's type is neither Server nor "
"a subclass of Server")
def is_alive(self):
"""Determine if connection to server is still alive.
Returns bool - True = alive, False = error or cannot connect.
"""
res = True
try:
if self.db_conn is None:
res = False
else:
# ping and is_connected only work partially, try exec_query
# to make sure connection is really alive
retval = self.db_conn.is_connected()
if retval:
self.exec_query("SHOW DATABASES")
else:
res = False
except:
res = False
return res
def _update_alias(self, ip_or_hostname, suffix_list):
"""Update list of aliases for the given IP or hostname.
Gets the list of aliases for host *ip_or_hostname*. If any
of them matches one of the server's aliases, then update
the list of aliases (self.aliases). It also receives a list (tuple)
of suffixes that can be ignored when checking if two hostnames are
the same.
ip_or_hostname[in] IP or hostname to test.
suffix_list[in] Tuple with list of suffixes that can be ignored.
Returns True if ip_or_hostname is a server alias, otherwise False.
"""
host_or_ip_aliases = self._get_aliases(ip_or_hostname)
host_or_ip_aliases.add(ip_or_hostname)
# Check if any of aliases matches with one the servers's aliases
common_alias = self.aliases.intersection(host_or_ip_aliases)
if common_alias: # There are common aliases, host is the same
self.aliases.update(host_or_ip_aliases)
return True
else: # Check with and without suffixes
no_suffix_server_aliases = set()
no_suffix_host_aliases = set()
for suffix in suffix_list:
# Add alias with and without suffix from self.aliases
for alias in self.aliases:
if alias.endswith(suffix):
host, _ = alias.rsplit('.')
no_suffix_server_aliases.add(host)
no_suffix_server_aliases.add(alias)
# Add alias with and without suffix from host_aliases
for alias in host_or_ip_aliases:
if alias.endswith(suffix):
host, _ = alias.rsplit('.')
no_suffix_host_aliases.add(host)
no_suffix_host_aliases.add(alias)
# Check if there is any alias in common
common_alias = no_suffix_host_aliases.intersection(
no_suffix_server_aliases)
if common_alias: # Same host, so update self.aliases
self.aliases.update(
no_suffix_host_aliases.union(no_suffix_server_aliases)
)
return True
return False
def _get_aliases(self, host):
"""Gets the aliases for the given host
"""
aliases = set([clean_IPv6(host)])
if hostname_is_ip(clean_IPv6(host)): # IP address
try:
my_host = socket.gethostbyaddr(clean_IPv6(host))
aliases.add(my_host[0])
# socket.gethostbyname_ex() does not work with ipv6
if (not my_host[0].count(":") < 1 or
not my_host[0] == "ip6-localhost"):
host_ip = socket.gethostbyname_ex(my_host[0])
else:
addrinfo = socket.getaddrinfo(my_host[0], None)
host_ip = ([socket.gethostbyaddr(addrinfo[0][4][0])],
[fiveple[4][0] for fiveple in addrinfo],
[addrinfo[0][4][0]])
except (socket.gaierror, socket.herror,
socket.error) as err:
host_ip = ([], [], [])
if self.verbose:
print("WARNING: IP lookup by address failed for {0},"
"reason: {1}".format(host, err.strerror))
else:
try:
# server may not really exist.
host_ip = socket.gethostbyname_ex(host)
except (socket.gaierror, socket.herror,
socket.error) as err:
if self.verbose:
print("WARNING: hostname: {0} may not be reachable, "
"reason: {1}".format(host, err.strerror))
return aliases
aliases.add(host_ip[0])
addrinfo = socket.getaddrinfo(host, None)
local_ip = None
error = None
for addr in addrinfo:
try:
local_ip = socket.gethostbyaddr(addr[4][0])
break
except (socket.gaierror, socket.herror,
socket.error) as err:
error = err
if local_ip:
host_ip = ([local_ip[0]],
[fiveple[4][0] for fiveple in addrinfo],
[addrinfo[0][4][0]])
else:
host_ip = ([], [], [])
if self.verbose:
print("WARNING: IP lookup by name failed for {0},"
"reason: {1}".format(host, error.strerror))
aliases.update(set(host_ip[1]))
aliases.update(set(host_ip[2]))
return aliases
def is_alias(self, host_or_ip):
"""Determine if host_or_ip is an alias for this host
host_or_ip[in] host or IP number to check
Returns bool - True = host_or_ip is an alias
"""
# List of possible suffixes
suffixes = ('.local', '.lan', '.localdomain')
host_or_ip = clean_IPv6(host_or_ip.lower())
# for quickness, verify in the existing aliases, if they exist.
if self.aliases:
if host_or_ip.lower() in self.aliases:
return True
else:
# get the alias for the given host_or_ip
return self._update_alias(host_or_ip, suffixes)
# no previous aliases information
# First, get the local information
hostname_ = socket.gethostname()
try:
local_info = socket.gethostbyname_ex(hostname_)
local_aliases = set([local_info[0].lower()])
# if dotted host name, take first part and use as an alias
try:
local_aliases.add(local_info[0].split('.')[0])
except:
pass
local_aliases.update(['127.0.0.1', 'localhost', '::1', '[::1]'])
local_aliases.update(local_info[1])
local_aliases.update(local_info[2])
local_aliases.update(self._get_aliases(hostname_))
except (socket.herror, socket.gaierror, socket.error) as err:
if self.verbose:
print("WARNING: Unable to find aliases for hostname"
" '{0}' reason: {1}".format(hostname_, str(err)))
# Try with the basic local aliases.
local_aliases = set(['127.0.0.1', 'localhost', '::1', '[::1]'])
# Get the aliases for this server host
self.aliases = self._get_aliases(self.host)
# Check if this server is local
for host in self.aliases.copy():
if host in local_aliases:
# Is local then save the local aliases for future.
self.aliases.update(local_aliases)
break
# Handle special suffixes in hostnames.
for suffix in suffixes:
if host.endswith(suffix):
# Remove special suffix and attempt to match with local
# aliases.
host, _ = host.rsplit('.', 1)
if host in local_aliases:
# Is local then save the local aliases for future.
self.aliases.update(local_aliases)
break
# Check if the given host_or_ip is alias of the server host.
if host_or_ip in self.aliases:
return True
# Check if any of the aliases of ip_or_host is also an alias of the
# host server.
return self._update_alias(host_or_ip, suffixes)
def user_host_exists(self, user, host_or_ip):
"""Check to see if a user, host exists
This method attempts to see if a user name matches the users on the
server and that any user, host pair can match the host or IP address
specified. This attempts to resolve wildcard matches.
user[in] user name
host_or_ip[in] host or IP address
Returns string - host from server that matches the host_or_ip or
None if no match.
"""
res = self.exec_query("SELECT host FROM mysql.user WHERE user = '%s' "
"AND '%s' LIKE host " % (user, host_or_ip))
if res:
return res[0][0]
return None
def get_connection_values(self):
"""Return a dictionary of connection values for the server.
Returns dictionary
"""
conn_vals = {
"user": self.user,
"host": self.host
}
if self.passwd:
conn_vals["passwd"] = self.passwd
if self.socket:
conn_vals["socket"] = self.socket
if self.port:
conn_vals["port"] = self.port
if self.ssl_ca:
conn_vals["ssl_ca"] = self.ssl_ca
if self.ssl_cert:
conn_vals["ssl_cert"] = self.ssl_cert
if self.ssl_key:
conn_vals["ssl_key"] = self.ssl_key
return conn_vals
def connect(self):
"""Connect to server
Attempts to connect to the server as specified by the connection
parameters.
Note: This method must be called before executing queries.
Raises UtilError if error during connect
"""
try:
self.db_conn = self.get_connection()
# If no charset provided, get it from the "character_set_client"
# server variable.
if not self.charset:
res = self.show_server_variable('character_set_client')
self.db_conn.set_charset_collation(charset=res[0][1])
self.charset = res[0][1]
except UtilError:
# Reset any previous value if the connection cannot be established,
# before raising an exception. This prevents the use of a broken
# database connection.
self.db_conn = None
raise
self.connect_error = None
self.read_only = self.show_server_variable("READ_ONLY")[0][1]
def get_connection(self):
"""Return a new connection to the server.
Attempts to connect to the server as specified by the connection
parameters and returns a connection object.
Return the resulting MySQL connection object or raises an UtilError if
an error occurred during the server connection process.
"""
try:
parameters = {
'user': self.user,
'host': self.host,
'port': self.port,
}
if self.socket and os.name == "posix":
parameters['unix_socket'] = self.socket
if self.passwd and self.passwd != "":
parameters['passwd'] = self.passwd
if self.charset:
parameters['charset'] = self.charset
parameters['host'] = parameters['host'].replace("[", "")
parameters['host'] = parameters['host'].replace("]", "")
# Add SSL parameters ONLY if they are not None
if self.ssl_ca is not None:
parameters['ssl_ca'] = self.ssl_ca
if self.ssl_cert is not None:
parameters['ssl_cert'] = self.ssl_cert
if self.ssl_key is not None:
parameters['ssl_key'] = self.ssl_key
if self.has_ssl:
cpy_flags = [ClientFlag.SSL, ClientFlag.SSL_VERIFY_SERVER_CERT]
parameters['client_flags'] = cpy_flags
db_conn = mysql.connector.connect(**parameters)
# Return MySQL connection object.
return db_conn
except mysql.connector.Error as err:
raise UtilError(err.msg, err.errno)
except AttributeError as err:
raise UtilError(str(err))
def disconnect(self):
"""Disconnect from the server.
"""
try:
self.db_conn.disconnect()
except:
pass
def get_version(self):
"""Return version number of the server.
Get the server version. The respective instance variable is set with
the result after querying the server the first time. The version is
immediately returned when already known, avoiding querying the server
at each time.
Returns string - version string or None if error
"""
# Return the local version value if already known.
if self._version:
return self._version
# Query the server for its version.
try:
res = self.show_server_variable("VERSION")
if res:
self._version = res[0][1]
except UtilError:
# Ignore errors and return _version, initialized with None.
pass
return self._version
def check_version_compat(self, t_major, t_minor, t_rel):
"""Checks version of the server against requested version.
This method can be used to check for version compatibility.
t_major[in] target server version (major)
t_minor[in] target server version (minor)
t_rel[in] target server version (release)
Returns bool True if server version is GE (>=) version specified,
False if server version is LT (<) version specified
"""
version_str = self.get_version()
if version_str is not None:
match = re.match(r'^(\d+\.\d+(\.\d+)*).*$', version_str.strip())
if match:
version = [int(x) for x in match.group(1).split('.')]
version = (version + [0])[:3] # Ensure a 3 elements list
return version >= [int(t_major), int(t_minor), int(t_rel)]
else:
return False
return True
def exec_query(self, query_str, options=None, exec_timeout=0):
"""Execute a query and return result set
This is the singular method to execute queries. It should be the only
method used as it contains critical error code to catch the issue
with mysql.connector throwing an error on an empty result set.
Note: will handle exception and print error if query fails
Note: if fetchall is False, the method returns the cursor instance
query_str[in] The query to execute
options[in] Options to control behavior:
params Parameters for query
columns Add column headings as first row
(default is False)
fetch Execute the fetch as part of the operation and
use a buffered cursor
(default is True)
raw If True, use a buffered raw cursor
(default is True)
commit Perform a commit (if needed) automatically at the
end (default: True).
exec_timeout[in] Timeout value in seconds to kill the query execution
if exceeded. Value must be greater than zero for
this feature to be enabled. By default 0, meaning
that the query will not be killed.
Returns result set or cursor
"""
if options is None:
options = {}
params = options.get('params', ())
columns = options.get('columns', False)
fetch = options.get('fetch', True)
raw = options.get('raw', True)
do_commit = options.get('commit', True)
# Guard for connect() prerequisite
assert self.db_conn, "You must call connect before executing a query."
# If we are fetching all, we need to use a buffered
if fetch:
if raw:
if mysql.connector.__version_info__ < (2, 0):
cur = self.db_conn.cursor(buffered=True, raw=True)
else:
cur = self.db_conn.cursor(
cursor_class=MySQLUtilsCursorBufferedRaw)
else:
cur = self.db_conn.cursor(buffered=True)
else:
if mysql.connector.__version_info__ < (2, 0):
cur = self.db_conn.cursor(raw=True)
else:
cur = self.db_conn.cursor(cursor_class=MySQLUtilsCursorRaw)
# Execute query, handling parameters.
q_killer = None
try:
if exec_timeout > 0:
# Spawn thread to kill query if timeout is reached.
# Note: set it as daemon to avoid waiting for it on exit.
q_killer = QueryKillerThread(self, query_str, exec_timeout)
q_killer.daemon = True
q_killer.start()
# Execute query.
if params == ():
cur.execute(query_str)
else:
cur.execute(query_str, params)
except mysql.connector.Error as err:
cur.close()
print(query_str + "\n")
print(str(err.errno) + "\n")
if err.errno == CR_SERVER_LOST and exec_timeout > 0:
print(str(exec_timeout) + "\n")
# If the connection is killed (because the execution timeout is
# reached), then it attempts to re-establish it (to execute
# further queries) and raise a specific exception to track this
# event.
# CR_SERVER_LOST = Errno 2013 Lost connection to MySQL server
# during query.
self.db_conn.reconnect()
raise UtilError("Timeout executing query", err.errno)
else:
raise UtilDBError("Query failed. {0}".format(err))
except Exception:
cur.close()
raise UtilError("Unknown error. Command: {0}".format(query_str))
finally:
# Stop query killer thread if alive.
if q_killer and q_killer.is_alive():
q_killer.stop()
# Fetch rows (only if available or fetch = True).
if cur.with_rows:
if fetch or columns:
try:
results = cur.fetchall()
if columns:
col_headings = cur.column_names
col_names = []
for col in col_headings:
col_names.append(col)
results = col_names, results
except mysql.connector.Error as err:
raise UtilDBError("Error fetching all query data: "
"{0}".format(err))
finally:
cur.close()
return results
else:
# Return cursor to fetch rows elsewhere (fetch = false).
return cur
else:
# No results (not a SELECT)
try:
if do_commit:
self.db_conn.commit()
except mysql.connector.Error as err:
raise UtilDBError("Error performing commit: {0}".format(err))
finally:
cur.close()
return cur
def commit(self):
"""Perform a COMMIT.
"""
# Guard for connect() prerequisite
assert self.db_conn, "You must call connect before executing a query."
self.db_conn.commit()
def rollback(self):
"""Perform a ROLLBACK.
"""
# Guard for connect() prerequisite
assert self.db_conn, "You must call connect before executing a query."
self.db_conn.rollback()
def show_server_variable(self, variable):
"""Returns one or more rows from the SHOW VARIABLES command.
variable[in] The variable or wildcard string
Returns result set
"""
return self.exec_query("SHOW VARIABLES LIKE '%s'" % variable)
def select_variable(self, var_name, var_type=None):
"""Get server system variable value using SELECT statement.
This function displays the value of system variables using the SELECT
statement. This can be used as a workaround for variables with very
long values, as SHOW VARIABLES is subject to a version-dependent
display-width limit.
Note: Some variables may not be available using SELECT @@var_name, in
such cases use SHOW VARIABLES LIKE 'var_name'.
var_name[in] Name of the variable to display.
var_type[in] Type of the variable ('session' or 'global'). By
default no type is used, meaning that the session
value is returned if it exists and the global value
otherwise.
Return the value for the given server system variable.
"""
if var_type is None:
var_type = ''
elif var_type.lower() in ('global', 'session', ''):
var_type = '{0}.'.format(var_type) # Add dot (.)
else:
raise UtilDBError("Invalid variable type: {0}. Supported types: "
"'global' and 'session'.".format(var_type))
# Execute SELECT @@[var_type.]var_name.
# Note: An error is issued if the given variable is not known.
res = self.exec_query("SELECT @@{0}{1}".format(var_type, var_name))
return res[0][0]
def flush_logs(self, log_type=None):
"""Execute the FLUSH [log_type] LOGS statement.
Reload internal logs cache and closes and reopens all log files, or
only of the specified log_type.
Note: The log_type option is available from MySQL 5.5.3.
log_type[in] Type of the log files to be flushed. Supported values:
BINARY, ENGINE, ERROR, GENERAL, RELAY, SLOW.
"""
if log_type:
self.exec_query("FLUSH {0} LOGS".format(log_type))
else:
self.exec_query("FLUSH LOGS")
def get_uuid(self):
"""Return the uuid for this server if it is GTID aware.
Returns uuid or None if server is not GTID aware.
"""
if self.supports_gtid() != "NO":
res = self.show_server_variable("server_uuid")
return res[0][1]
return None
def supports_gtid(self):
"""Determine if server supports GTIDs
Returns string - 'ON' = gtid supported and turned on,
'OFF' = supported but not enabled,
'NO' = not supported
"""
# Check servers for GTID support
version_ok = self.check_version_compat(5, 6, 5)
if not version_ok:
return "NO"
try:
res = self.exec_query("SELECT @@GLOBAL.GTID_MODE")
except:
return "NO"
return res[0][0]
def check_gtid_version(self):
"""Determine if server supports latest GTID changes
This method checks the server to ensure it contains the latest
changes to the GTID variables (from version 5.6.9).
Raises UtilRplError when errors occur.
"""
errors = []
if not self.supports_gtid() == "ON":
errors.append(" GTID is not enabled.")
if not self.check_version_compat(5, 6, 9):
errors.append(" Server version must be 5.6.9 or greater.")
res = self.exec_query("SHOW VARIABLES LIKE 'gtid_executed'")
if res == [] or not res[0][0] == "gtid_executed":
errors.append(" Missing gtid_executed system variable.")
if errors:
errors = "\n".join(errors)
errors = "\n".join([_GTID_ERROR % (self.host, self.port), errors])
raise UtilRplError(errors)
def check_gtid_executed(self, operation="copy"):
"""Check to see if the gtid_executed variable is clear
If the value is not clear, raise an error with appropriate instructions
for the user to correct the issue.
operation[in] Name of the operation (copy, import, etc.)
default = copy
"""
res = self.exec_query("SHOW GLOBAL VARIABLES LIKE 'gtid_executed'")[0]
if res[1].strip() == '':
return
err = ("The {0} operation contains GTID statements "
"that require the global gtid_executed system variable on the "
"target to be empty (no value). The gtid_executed value must "
"be reset by issuing a RESET MASTER command on the target "
"prior to attempting the {0} operation. "
"Once the global gtid_executed value is cleared, you may "
"retry the {0}.").format(operation)
raise UtilRplError(err)
def get_gtid_executed(self, skip_gtid_check=True):
"""Get the executed GTID set of the server.
This function retrieves the (current) GTID_EXECUTED set of the server.
skip_gtid_check[in] Flag indicating if the check for GTID support
will be skipped or not. By default 'True'
(check is skipped).
Returns a string with the GTID_EXECUTED set for this server.
"""
if not skip_gtid_check:
# Check server for GTID support.
gtid_support = self.supports_gtid() == "NO"
if gtid_support == 'NO':
raise UtilRplError("Global Transaction IDs are not supported.")
elif gtid_support == 'OFF':
raise UtilError("Global Transaction IDs are not enabled.")
# Get GTID_EXECUTED.
try:
return self.exec_query("SELECT @@GLOBAL.GTID_EXECUTED")[0][0]
except UtilError:
if skip_gtid_check:
# Query likely failed because GTIDs are not supported,
# therefore skip error in this case.
return ""
else:
# If GTID check is not skipped re-raise exception.
raise
except IndexError:
# If no rows are returned by query then return an empty string.
return ''
def gtid_subtract(self, gtid_set, gtid_subset):
"""Subtract given GTID sets.
This function invokes GTID_SUBTRACT function on the server to retrieve
the GTIDs from the given gtid_set that are not in the specified
gtid_subset.
gtid_set[in] Base GTID set to subtract the subset from.
gtid_subset[in] GTID subset to be subtracted from the base set.
Return a string with the GTID set resulting from the subtraction of the
specified gtid_subset from the gtid_set.
"""
try:
return self.exec_query(
"SELECT GTID_SUBTRACT('{0}', '{1}')".format(gtid_set,
gtid_subset)
)[0][0]
except IndexError:
# If no rows are returned by query then return an empty string.
return ''
def gtid_subtract_executed(self, gtid_set):
"""Subtract GTID_EXECUTED to the given GTID set.
This function invokes GTID_SUBTRACT function on the server to retrieve
the GTIDs from the given gtid_set that are not in the GTID_EXECUTED
set.
gtid_set[in] Base GTID set to subtract the GTID_EXECUTED.
Return a string with the GTID set resulting from the subtraction of the
GTID_EXECUTED set from the specified gtid_set.
"""
from mysql.utilities.common.topology import _GTID_SUBTRACT_TO_EXECUTED
try:
return self.exec_query(
_GTID_SUBTRACT_TO_EXECUTED.format(gtid_set)
)[0][0]
except IndexError:
# If no rows are returned by query then return an empty string.
return ''
def checksum_table(self, tbl_name, exec_timeout=0):
"""Compute checksum of specified table (CHECKSUM TABLE tbl_name).
This function executes the CHECKSUM TABLE statement for the specified
table and returns the result. The CHECKSUM is aborted (query killed)
if a timeout value (greater than zero) is specified and the execution
takes longer than the specified time.
tbl_name[in] Name of the table to perform the checksum.
exec_timeout[in] Maximum execution time (in seconds) of the query
after which it will be killed. By default 0, no
timeout.
Returns a tuple with the checksum result for the target table. The
first tuple element contains the result from the CHECKSUM TABLE query
or None if an error occurred (e.g. execution timeout reached). The
second element holds any error message or None if the operation was
successful.
"""
try:
return self.exec_query(
"CHECKSUM TABLE {0}".format(tbl_name),
exec_timeout=exec_timeout
)[0], None
except IndexError:
# If no rows are returned by query then return None.
return None, "No data returned by CHECKSUM TABLE"
except UtilError as err:
# Return None if the query is killed (exec_timeout reached).
return None, err.errmsg
def get_gtid_status(self):
"""Get the GTID information for the server.
This method attempts to retrieve the GTID lists. If the server
does not have GTID turned on or does not support GTID, the method
will throw and exception.
Returns [list, list, list]
"""
# Check servers for GTID support
if self.supports_gtid() == "NO":
raise UtilError("Global Transaction IDs are not supported.")
res = self.exec_query("SELECT @@GLOBAL.GTID_MODE")
if res[0][0].upper() == 'OFF':
raise UtilError("Global Transaction IDs are not enabled.")
gtid_data = [self.exec_query("SELECT @@GLOBAL.GTID_EXECUTED")[0],
self.exec_query("SELECT @@GLOBAL.GTID_PURGED")[0],
self.exec_query("SELECT @@GLOBAL.GTID_OWNED")[0]]
return gtid_data
def check_rpl_user(self, user, host):
"""Check replication user exists and has the correct privileges.
user[in] user name of rpl_user
host[in] host name of rpl_user
Returns [] - no exceptions, list if exceptions found
"""
errors = []
ipv6 = False
if "]" in host:
ipv6 = True
host = clean_IPv6(host)
result = self.user_host_exists(user, host)
if ipv6:
result = format_IPv6(result)
if result is None or result == []:
errors.append("The replication user %s@%s was not found "
"on %s:%s." % (user, host, self.host, self.port))
else:
rpl_user = User(self, "%s@" % user + result)
if not rpl_user.has_privilege('*', '*',
'REPLICATION SLAVE'):
errors.append("Replication user does not have the "
"correct privilege. She needs "
"'REPLICATION SLAVE' on all replicated "
"databases.")
return errors
def supports_plugin(self, plugin):
"""Check if the given plugin is supported.
Check to see if the server supports a plugin. Return True if
plugin installed and active.
plugin[in] Name of plugin to check
Returns True if plugin is supported, and False otherwise.
"""
_PLUGIN_QUERY = ("SELECT * FROM INFORMATION_SCHEMA.PLUGINS "
"WHERE PLUGIN_NAME ")
res = self.exec_query("".join([_PLUGIN_QUERY, "LIKE ",
"'%s" % plugin, "%'"]))
if not res:
return False
# Now see if it is active.
elif res[0][2] != 'ACTIVE':
return False
return True
def get_all_databases(self, ignore_internal_dbs=True):
"""Return a result set containing all databases on the server
except for internal databases (mysql, INFORMATION_SCHEMA,
PERFORMANCE_SCHEMA)
Returns result set
"""
if ignore_internal_dbs:
_GET_DATABASES = """
SELECT SCHEMA_NAME
FROM INFORMATION_SCHEMA.SCHEMATA
WHERE SCHEMA_NAME != 'INFORMATION_SCHEMA'
AND SCHEMA_NAME != 'PERFORMANCE_SCHEMA'
AND SCHEMA_NAME != 'mysql'
"""
else:
_GET_DATABASES = """
SELECT SCHEMA_NAME
FROM INFORMATION_SCHEMA.SCHEMATA
"""
return self.exec_query(_GET_DATABASES)
def get_storage_engines(self):
"""Return list of storage engines on this server.
Returns (list) (engine, support, comment)
"""
_QUERY = """
SELECT UPPER(engine), UPPER(support)
FROM INFORMATION_SCHEMA.ENGINES
ORDER BY engine
"""
return self.exec_query(_QUERY)
def check_storage_engines(self, other_list):
"""Compare storage engines from another server.
This method compares the list of storage engines for the current
server against a list supplied as **other_list**. It returns two
lists - one for the storage engines on this server not on the other
list, and another for the storage engines on the other list not on this
server.
Note: type case sensitive - make sure list is in uppercase
other_list[in] A list from another server in the form
(engine, support) - same output as
get_storage_engines()
Returns (list, list)
"""
# Guard for connect() prerequisite
assert self.db_conn, "You must call connect before check engine lists."
def _convert_set_to_list(set_items):
"""Convert a set to list
"""
if len(set_items) > 0:
item_list = []
for item in set_items:
item_list.append(item)
else:
item_list = None
return item_list
# trivial, but guard against misuse
this_list = self.get_storage_engines()
if other_list is None:
return (this_list, None)
same = set(this_list) & set(other_list)
master_extra = _convert_set_to_list(set(this_list) - same)
slave_extra = _convert_set_to_list(set(other_list) - same)
return (master_extra, slave_extra)
def has_storage_engine(self, target):
"""Check to see if an engine exists and is supported.
target[in] name of engine to find
Returns bool True - engine exists and is active, false = does not
exist or is not supported/not active/disabled
"""
if len(target) == 0:
return True # This says we will use default engine on the server.
if target is not None:
engines = self.get_storage_engines()
for engine in engines:
if engine[0].upper() == target.upper() and \
engine[1].upper() in ['YES', 'DEFAULT']:
return True
return False
def substitute_engine(self, tbl_name, create_str,
new_engine, def_engine, quiet=False):
"""Replace storage engine in CREATE TABLE
This method will replace the storage engine in the CREATE statement
under the following conditions:
- If new_engine is specified and it exists on destination, use it.
- Else if existing engine does not exist and def_engine is specfied
and it exists on destination, use it. Also, don't substitute if
the existing engine will not be changed.
tbl_name[in] table name
create_str[in] CREATE statement
new_engine[in] name of storage engine to substitute (convert to)
def_engine[in] name of storage engine to use if existing engines
does not exist
Returns string CREATE string with replacements if found, else return
original string
"""
res = [create_str]
exist_engine = ''
is_create_like = False
replace_msg = "# Replacing ENGINE=%s with ENGINE=%s for table %s."
add_msg = "# Adding missing ENGINE=%s clause for table %s."
if new_engine is not None or def_engine is not None:
i = create_str.find("ENGINE=")
if i > 0:
j = create_str.find(" ", i)
exist_engine = create_str[i + 7:j]
else:
## Check if it is a CREATE TABLE LIKE statement
is_create_like = (create_str.find("CREATE TABLE {0} LIKE"
"".format(tbl_name)) == 0)
# Set default engine
#
# If a default engine is specified and is not the same as the
# engine specified in the table CREATE statement (existing engine) if
# specified, and both engines exist on the server, replace the existing
# engine with the default engine.
#
if def_engine is not None and \
exist_engine.upper() != def_engine.upper() and \
self.has_storage_engine(def_engine) and \
self.has_storage_engine(exist_engine):
# If no ENGINE= clause present, add it
if len(exist_engine) == 0:
if is_create_like:
alter_str = "ALTER TABLE {0} ENGINE={1}".format(tbl_name,
def_engine)
res = [create_str, alter_str]
else:
i = create_str.find(";")
i = len(create_str) if i == -1 else i
create_str = "{0} ENGINE={1};".format(create_str[0:i],
def_engine)
res = [create_str]
# replace the existing storage engine
else:
create_str.replace("ENGINE=%s" % exist_engine,
"ENGINE=%s" % def_engine)
if not quiet:
if len(exist_engine) > 0:
print replace_msg % (exist_engine, def_engine, tbl_name)
else:
print add_msg % (def_engine, tbl_name)
exist_engine = def_engine
# Use new engine
if (new_engine is not None and
exist_engine.upper() != new_engine.upper() and
self.has_storage_engine(new_engine)):
if len(exist_engine) == 0:
if is_create_like:
alter_str = "ALTER TABLE {0} ENGINE={1}".format(tbl_name,
new_engine)
res = [create_str, alter_str]
else:
i = create_str.find(";")
i = len(create_str) if i == -1 else i
create_str = "{0} ENGINE={1};".format(create_str[0:i],
new_engine)
res = [create_str]
else:
create_str = create_str.replace("ENGINE=%s" % exist_engine,
"ENGINE=%s" % new_engine)
res = [create_str]
if not quiet:
if len(exist_engine) > 0:
print replace_msg % (exist_engine, new_engine, tbl_name)
else:
print add_msg % (new_engine, tbl_name)
return res
def get_innodb_stats(self):
"""Return type of InnoDB engine and its version information.
This method returns a tuple containing the type of InnoDB storage
engine (builtin or plugin) and the version number reported.
Returns (tuple) (type = 'builtin' or 'plugin', version_number,
have_innodb = True or False)
"""
# Guard for connect() prerequisite
assert self.db_conn, "You must call connect before get innodb stats."
_BUILTIN = """
SELECT (support='YES' OR support='DEFAULT' OR support='ENABLED')
AS `exists` FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb';
"""
_PLUGIN = """
SELECT (plugin_library LIKE 'ha_innodb_plugin%') AS `exists`
FROM INFORMATION_SCHEMA.PLUGINS
WHERE LOWER(plugin_name) = 'innodb' AND
LOWER(plugin_status) = 'active';
"""
_VERSION = """
SELECT plugin_version, plugin_type_version
FROM INFORMATION_SCHEMA.PLUGINS
WHERE LOWER(plugin_name) = 'innodb';
"""
inno_type = None
results = self.exec_query(_BUILTIN)
if results is not None and results != () and results[0][0] is not None:
inno_type = "builtin"
results = self.exec_query(_PLUGIN)
if results is not None and results != () and \
results != [] and results[0][0] is not None:
inno_type = "plugin "
results = self.exec_query(_VERSION)
version = []
if results is not None:
version.append(results[0][0])
version.append(results[0][1])
else:
version.append(None)
version.append(None)
results = self.show_server_variable("have_innodb")
if results is not None and results != [] and \
results[0][1].lower() == "yes":
have_innodb = True
else:
have_innodb = False
return (inno_type, version[0], version[1], have_innodb)
def read_and_exec_SQL(self, input_file, verbose=False):
"""Read an input file containing SQL statements and execute them.
input_file[in] The full path to the file
verbose[in] Print the command read
Default = False
Returns True = success, False = error
TODO : Make method read multi-line queries.
"""
f_input = open(input_file)
res = True
while True:
cmd = f_input.readline()
if not cmd:
break
res = None
if len(cmd) > 1:
if cmd[0] != '#':
if verbose:
print cmd
query_options = {
'fetch': False
}
res = self.exec_query(cmd, query_options)
f_input.close()
return res
def binlog_enabled(self):
"""Check binary logging status for the client.
Returns bool - True - binary logging is ON, False = OFF
"""
res = self.show_server_variable("log_bin")
if not res:
raise UtilRplError("Cannot retrieve status of log_bin variable.")
if res[0][1] in ("OFF", "0"):
return False
return True
def toggle_binlog(self, action="disable"):
"""Enable or disable binary logging for the client.
Note: user must have SUPER privilege
action[in] if 'disable', turn off the binary log
elif 'enable' turn binary log on
do nothing if action != 'enable' or 'disable'
"""
if action.lower() == 'disable':
self.exec_query("SET SQL_LOG_BIN=0")
elif action.lower() == 'enable':
self.exec_query("SET SQL_LOG_BIN=1")
def foreign_key_checks_enabled(self, force=False):
"""Check foreign key status for the connection.
force[in] if True, returns the value directly from the server
instead of returning the cached fkey value
Returns bool - True - foreign keys are enabled
"""
if self.fkeys is None or force:
res = self.show_server_variable("foreign_key_checks")
self.fkeys = (res is not None) and (res[0][1] == "ON")
return self.fkeys
def disable_foreign_key_checks(self, disable=True):
"""Enable or disable foreign key checks for the connection.
disable[in] if True, turn off foreign key checks
elif False turn foreign key checks on.
"""
if self.fkeys is None:
self.foreign_key_checks_enabled()
# Only do something if foreign keys are OFF and shouldn't be disabled
# or if they are ON and should be disabled
if self.fkeys == disable:
val = "OFF" if disable else "ON"
self.exec_query(_FOREIGN_KEY_SET.format(val),
{'fetch': False, 'commit': False})
self.fkeys = not self.fkeys
def autocommit_set(self):
"""Check autocommit status for the connection.
Returns bool - True if autocommit is enabled and False otherwise.
"""
if self.autocommit is None:
res = self.show_server_variable('autocommit')
self.autocommit = (res and res[0][1] == '1')
return self.autocommit
def toggle_autocommit(self, enable=None):
"""Enable or disable autocommit for the connection.
This method switch the autocommit value or enable/disable it according
to the given parameter.
enable[in] if True, turn on autocommit (set to 1)
else if False turn autocommit off (set to 0).
"""
if enable is None:
# Switch autocommit value.
if self.autocommit is None:
# Get autocommit value if unknown
self.autocommit_set()
if self.autocommit:
value = '0'
self.autocommit = False
else:
value = '1'
self.autocommit = True
else:
# Set AUTOCOMMIT according to provided value.
if enable:
value = '1'
self.autocommit = True
else:
value = '0'
self.autocommit = False
# Change autocommit value.
self.exec_query(_AUTOCOMMIT_SET.format(value), {'fetch': 'false'})
def get_server_id(self):
"""Retrieve the server id.
Returns int - server id.
"""
try:
res = self.show_server_variable("server_id")
except:
raise UtilRplError("Cannot retrieve server id from "
"%s." % self.role)
return int(res[0][1])
def get_server_uuid(self):
"""Retrieve the server uuid.
Returns string - server uuid.
"""
try:
res = self.show_server_variable("server_uuid")
if res is None or res == []:
return None
except:
raise UtilRplError("Cannot retrieve server_uuid from "
"%s." % self.role)
return res[0][1]
def get_lctn(self):
"""Get lower_case_table_name setting.
Returns lctn value or None if cannot get value
"""
res = self.show_server_variable("lower_case_table_names")
if res != []:
return res[0][1]
return None
def get_binary_logs(self, options=None):
"""Return a list of the binary logs.
options[in] query options
Returns list - binlogs or None if binary logging turned off
"""
if options is None:
options = {}
if self.binlog_enabled():
return self.exec_query("SHOW BINARY LOGS", options)
return None
def set_read_only(self, on=False):
"""Turn read only mode on/off
on[in] if True, turn read_only ON
Default is False
"""
# Only turn on|off read only if it were off at connect()
if not self.read_only:
return self.exec_query("SET @@GLOBAL.READ_ONLY = %s" %
"ON" if on else "OFF")
return None
def grant_tables_enabled(self):
"""Check to see if grant tables are enabled
Returns bool - True = grant tables are enabled, False = disabled
"""
if self.grants_enabled is None:
try:
self.exec_query("SHOW GRANTS FOR 'snuffles'@'host'")
self.grants_enabled = True
except UtilError as error:
if "--skip-grant-tables" in error.errmsg:
self.grants_enabled = False
# Ignore other errors as they are not pertinent to the check
else:
self.grants_enabled = True
return self.grants_enabled
class QueryKillerThread(threading.Thread):
"""Class to run a thread to kill an executing query.
This class is used to spawn a thread than will kill the execution
(connection) of a query upon reaching a given timeout.
"""
def __init__(self, server, query, timeout):
"""Constructor.
server[in] Server instance where the target query is executed.
query[in] Target query to kill.
timeout[in] Timeout value in seconds used to kill the query when
reached.
"""
threading.Thread.__init__(self)
self._stop_event = threading.Event()
self._query = query
self._timeout = timeout
self._server = server
self._connection = server.get_connection()
server.get_version()
def run(self):
"""Main execution of the query killer thread.
Stop the thread if instructed as such
"""
connector_error = None
# Kill the query connection upon reaching the given execution timeout.
while not self._stop_event.is_set():
# Wait during the defined time.
self._stop_event.wait(self._timeout)
# If the thread was asked to stop during wait, it does not try to
# kill the query.
if not self._stop_event.is_set():
try:
if mysql.connector.__version_info__ < (2, 0):
cur = self._connection.cursor(raw=True)
else:
cur = self._connection.cursor(
cursor_class=MySQLUtilsCursorRaw)
# Get process information from threads table when available
# (for versions > 5.6.1), since it does not require a mutex
# and has minimal impact on server performance.
if self._server.check_version_compat(5, 6, 1):
cur.execute(
"SELECT processlist_id "
"FROM performance_schema.threads"
" WHERE processlist_command='Query'"
" AND processlist_info='{0}'".format(self._query))
else:
cur.execute(
"SELECT id FROM information_schema.processlist"
" WHERE command='Query'"
" AND info='{0}'".format(self._query))
result = cur.fetchall()
try:
process_id = result[0][0]
except IndexError:
# No rows are returned if the query ended in the
# meantime.
process_id = None
# Kill the connection associated to que process id.
# Note: killing the query will not work with
# connector-python,since it will hang waiting for the
# query to return.
if process_id:
cur.execute("KILL {0}".format(process_id))
except mysql.connector.Error as err:
# Hold error to raise at the end.
connector_error = err
finally:
# Close cursor if available.
if cur:
cur.close()
# Stop this thread.
self.stop()
# Close connection.
try:
self._connection.disconnect()
except mysql.connector.Error:
# Only raise error if no previous error has occurred.
if not connector_error:
raise
finally:
# Raise any previous error that already occurred.
if connector_error is not None:
# pylint: disable=E0702
raise connector_error
def stop(self):
"""Stop the thread.
Set the event flag for the thread to stop as soon as possible.
"""
self._stop_event.set()
| apache-2.0 | 423,251,853,561,488,700 | 37.520248 | 79 | 0.547716 | false |
davebridges/Lab-Website | papers/api.py | 2 | 11043 | '''This package controls API access to the :mod:`papers` app.
Overview
--------
The API for the :mod:`papers` application provides data on publications. The data can be provided as either a group of publications or as a single publication. Only GET requests are accepted.
These urls are served at the endpoint **/api/v1/publications/**, and depends on your server url. For these examples we will presume that you can reach this endpoint at **http://yourserver.org/api/v1/publications/**. Currently for all requests, no authentication is required. The entire API schema is available at::
http://yourserver.org/api/v1/publications/schema/?format=xml
http://yourserver.org/api/v1/publications/schema/?format=json
Sample Code
-----------
Either group requests or single publication requests can be served depending on if the primary key is provided. The request URI has several parts including the servername, the api version (currently v1) then the item type (publications). There must be a trailing slash before the request parameters (which are after a **?** sign and separated by a **&** sign).
For a collection of publications
````````````````````````````````
For a collection of publications you can request::
http://yourserver.org/api/v1/publications/?format=json
This would return all publications in the database. This would return the following json response with two JSON objects, meta and objects.
The meta object contains fields for the limit, next, offset, previous and total_count for the series of objects requested. The objects portion is an array of the returned publications. Note the id field of a publication. This is used for retrieving a single publication. Collections can also be filtered based on type or year::
http://yourserver.org/api/v1/publications/?format=json&year=2012
http://yourserver.org/api/v1/publications/?format=json&type=journal-article
http://yourserver.org/api/v1/publications/?format=json&type=journal-article&year=2012
http://yourserver.org/api/v1/publications/set/1;3/?format=json
The last example requests the publications with id numbers 1 and 3.
For a single publication
````````````````````````
To retrieve a single publication you need to know the primary key of the object. This can be found from the id parameter of a collection (see above) or from the actual object page. You can retrieve details about a single article with a call such as::
http://yourserver.org/api/v1/publications/2/?format=json
In this case **2** is the primary key (or id field) of the publication in question.
Reference
---------
Request Parameters
``````````````````
The following are the potential request variables. You must supply a format, but can also filter based on other parameters. By default 20 items are returned but you can increase this to all by setting limit=0.
+------------------+-----------------------------------------+
| Parameter | Potential Values |
+==================+=========================================+
| format | **json** or **xml** |
+------------------+-----------------------------------------+
| year | **2008** |
+------------------+-----------------------------------------+
| type | **journal-article** or **book-section** |
+------------------+-----------------------------------------+
| laboratory_paper | **true** or **false** |
+------------------+-----------------------------------------+
| limit | **0** for all, any other number |
+------------------+-----------------------------------------+
Response Values
```````````````
The response (in either json or xml) provides the following fields for each object (or for the only object in the case of a single object request).
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| Field | Explanation | Sample Value |
+====================+=====================================================+=============================================================+
| absolute_url | the url of the page on this site | /papers/tc10-is-regulated-by-caveolin-in-3t3-l1-adipocytes/ |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| abstract | abstract or summary | some text... |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| date_added | data added to this database | 2012-08-18 |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| date_last_modified | last modified in database | 2012-08-25 |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| doi | digital object identifier | 10.1371/journal.pone.0042451 |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| id | the database id number | 1 |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| interesting_paper | whether the paper is marked as an interesting paper | false |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| issue | the issue of the journal | 8 |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| journal | the name of the journal | PLOS One |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| laboratory_paper | whether the paper is from this laboratory | true |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| mendeley_id | the mendeley id number for the paper | null |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| mendeley_url | the mendeley url for the paper | |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| pages | page range for the paper | e42451 |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| pmcid | PubMed Central id number | null |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| pmid | PubMed id number | 22900022 |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| resource_uri | a link to the api for this publication | /api/v1/publications/1/ |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| title | the title of the paper | TC10 Is Regulated by Caveolin in 3T3-L1 Adipocytes. |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| title_slug | slugified title of the paper | tc10-is-regulated-by-caveolin-in-3t3-l1-adipocytes |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| type | type of publication | journal-article |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| volume | volume of the article in a journal | 7 |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
| year | publication year | 2012 |
+--------------------+-----------------------------------------------------+-------------------------------------------------------------+
'''
from tastypie.resources import ModelResource
from papers.models import Publication
class PublicationResource(ModelResource):
'''This generates the API resource for :class:`~papers.models.Publication` objects.
It returns all publications in the database.
Authors are currently not linked, as that would require an API to the :mod:`personnel` app.
'''
class Meta:
'''The API serves all :class:`~papers.models.Publication` objects in the database..'''
queryset = Publication.objects.all()
resource_name = 'publications'
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
include_absolute_url = True
filtering = {
"year": 'exact',
"type": ('exact', 'contains',),
"laboratory_paper": 'exact',
}
| mit | -652,082,548,811,331,100 | 76.230769 | 362 | 0.350448 | false |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/prompt_toolkit/input.py | 20 | 3214 | """
Abstraction of CLI Input.
"""
from __future__ import unicode_literals
from .utils import DummyContext, is_windows
from abc import ABCMeta, abstractmethod
from six import with_metaclass
import io
import os
import sys
if is_windows():
from .terminal.win32_input import raw_mode, cooked_mode
else:
from .terminal.vt100_input import raw_mode, cooked_mode
__all__ = (
'Input',
'StdinInput',
'PipeInput',
)
class Input(with_metaclass(ABCMeta, object)):
"""
Abstraction for any input.
An instance of this class can be given to the constructor of a
:class:`~prompt_toolkit.interface.CommandLineInterface` and will also be
passed to the :class:`~prompt_toolkit.eventloop.base.EventLoop`.
"""
@abstractmethod
def fileno(self):
"""
Fileno for putting this in an event loop.
"""
@abstractmethod
def read(self):
"""
Return text from the input.
"""
@abstractmethod
def raw_mode(self):
"""
Context manager that turns the input into raw mode.
"""
@abstractmethod
def cooked_mode(self):
"""
Context manager that turns the input into cooked mode.
"""
class StdinInput(Input):
"""
Simple wrapper around stdin.
"""
def __init__(self, stdin=None):
self.stdin = stdin or sys.stdin
# The input object should be a TTY.
assert self.stdin.isatty()
# Test whether the given input object has a file descriptor.
# (Idle reports stdin to be a TTY, but fileno() is not implemented.)
try:
# This should not raise, but can return 0.
self.stdin.fileno()
except io.UnsupportedOperation:
if 'idlelib.run' in sys.modules:
raise io.UnsupportedOperation(
'Stdin is not a terminal. Running from Idle is not supported.')
else:
raise io.UnsupportedOperation('Stdin is not a terminal.')
def __repr__(self):
return 'StdinInput(stdin=%r)' % (self.stdin,)
def raw_mode(self):
return raw_mode(self.stdin.fileno())
def cooked_mode(self):
return cooked_mode(self.stdin.fileno())
def fileno(self):
return self.stdin.fileno()
def read(self):
return self.stdin.read()
class PipeInput(Input):
"""
Input that is send through a pipe.
This is useful if we want to send the input programatically into the
interface, but still use the eventloop.
Usage::
input = PipeInput()
input.send('inputdata')
"""
def __init__(self):
self._r, self._w = os.pipe()
def fileno(self):
return self._r
def read(self):
return os.read(self._r)
def send_text(self, data):
" Send text to the input. "
os.write(self._w, data.encode('utf-8'))
# Deprecated alias for `send_text`.
send = send_text
def raw_mode(self):
return DummyContext()
def cooked_mode(self):
return DummyContext()
def close(self):
" Close pipe fds. "
os.close(self._r)
os.close(self._w)
self._r = None
self._w = None
| gpl-3.0 | 6,210,073,192,508,431,000 | 22.807407 | 83 | 0.595831 | false |
brianwoo/django-tutorial | build/Django/build/lib.linux-x86_64-2.7/django/conf/locale/pt/formats.py | 115 | 1717 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| gpl-3.0 | 5,912,890,879,756,242,000 | 40.853659 | 90 | 0.538462 | false |
tidalmelon/twisted-intro | twisted-server-1/fastpoetry.py | 11 | 1796 | # This is the Twisted Fast Poetry Server, version 1.0
import optparse, os
from twisted.internet.protocol import ServerFactory, Protocol
def parse_args():
usage = """usage: %prog [options] poetry-file
This is the Fast Poetry Server, Twisted edition.
Run it like this:
python fastpoetry.py <path-to-poetry-file>
If you are in the base directory of the twisted-intro package,
you could run it like this:
python twisted-server-1/fastpoetry.py poetry/ecstasy.txt
to serve up John Donne's Ecstasy, which I know you want to do.
"""
parser = optparse.OptionParser(usage)
help = "The port to listen on. Default to a random available port."
parser.add_option('--port', type='int', help=help)
help = "The interface to listen on. Default is localhost."
parser.add_option('--iface', help=help, default='localhost')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Provide exactly one poetry file.')
poetry_file = args[0]
if not os.path.exists(args[0]):
parser.error('No such file: %s' % poetry_file)
return options, poetry_file
class PoetryProtocol(Protocol):
def connectionMade(self):
self.transport.write(self.factory.poem)
self.transport.loseConnection()
class PoetryFactory(ServerFactory):
protocol = PoetryProtocol
def __init__(self, poem):
self.poem = poem
def main():
options, poetry_file = parse_args()
poem = open(poetry_file).read()
factory = PoetryFactory(poem)
from twisted.internet import reactor
port = reactor.listenTCP(options.port or 0, factory,
interface=options.iface)
print 'Serving %s on %s.' % (poetry_file, port.getHost())
reactor.run()
if __name__ == '__main__':
main()
| mit | -1,949,318,157,078,893,800 | 22.025641 | 71 | 0.666481 | false |
xingjian-f/Leetcode-solution | 173. Binary Search Tree Iterator.py | 1 | 1262 | # Definition for a binary tree node
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BSTIterator(object):
def move_left(self, node, stack):
while node.left is not None:
stack.append(node)
node = node.left
return node
def __init__(self, root):
"""
:type root: TreeNode
"""
if root is None:
self.exit_next = False
return
self.exit_next = True
self.path_stack = []
self.now = self.move_left(root, self.path_stack)
def hasNext(self):
"""
:rtype: bool
"""
return self.exit_next
def next(self):
"""
:rtype: int
"""
ret = self.now.val
node = self.now
if node.right is None:
if len(self.path_stack) == 0:
self.exit_next = False
else:
node = self.path_stack.pop()
else:
node = self.move_left(node.right, self.path_stack)
self.now = node
return ret
# Your BSTIterator will be called like this:
# i, v = BSTIterator(root), []
# while i.hasNext(): v.append(i.next()) | mit | 2,159,593,557,568,160,500 | 22.830189 | 62 | 0.497623 | false |
gwu-business/salad-system-py | software/add_menu_item.py | 1 | 2164 | # python software/populate_db.py
# ... source: https://github.com/PyMySQL/PyMySQL#example
import pymysql.cursors
import os
try:
DB_ROOT_PASSWORD = os.environ["MYSQL_ROOT_PASSWORD"] # if your root user has a password, assign it to the "MYSQL_ROOT_PASSWORD" environment variable
except KeyError as e:
DB_ROOT_PASSWORD = "" # most students' root user doesn't have a password
# ESTABLISH CONNECTION WITH SALAD DATABASE
connection = pymysql.connect(
host='localhost',
port=3306,
user='root',
passwd= DB_ROOT_PASSWORD,
db='salad_db',
#charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
try:
with connection.cursor() as cursor:
# CREATE A NEW MENU ITEM RECORD
sql = "INSERT INTO `menu_items` (`category`,`title`,`calories`,`gluten_free`,`vegan_safe`,`description`) VALUES (%s, %s, %s, %s, %s, %s)"
cursor.execute(sql, ('SignatureSalad', 'TEST SALAD', 1111, 0, 1, 'a salad to use when testing the web application.') )
connection.commit() # connection is not autocommit by default. So you must commit to save your changes.
with connection.cursor() as cursor:
# EXECUTE CUSTOM QUERY TO FIND THE LATEST MENU ITEM RECORD
sql = "SELECT * FROM menu_items ORDER BY id DESC LIMIT 1"
cursor.execute(sql)
#result = cursor.fetchone()
for row in cursor.fetchall():
print(row)
finally:
connection.close() # always close the connection when finished.
##import pymysql # https://github.com/PyMySQL/PyMySQL
##
### OPEN THE DB CONNECTION
##
##connection = pymysql.connect(host='localhost', port=3306, user='root', passwd='y0l0') # , db='mysql'
##cursor = connection.cursor()
##
### EXECUTE A QUERY
##
##cursor.execute("SELECT * FROM mysql.user;")
##
### LOG QUERY RESULTS
##
##print type(cursor)
##print cursor
##print cursor.description
##print cursor.fetchall()
##num_fields = len(cursor.description)
##print num_fields
##
##field_names = [i[0] for i in cursor.description]
##print field_names
##
##for row in cursor.fetchall():
## print(row)
##
### CLOSE THE DB CONNECTION
##
##cursor.close()
##connection.close()
| mit | -1,822,069,577,736,512,500 | 22.021277 | 152 | 0.667283 | false |
AnotherIvan/calibre | src/calibre/gui2/store/stores/smashwords_plugin.py | 15 | 3729 | # -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 2 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
import random
import re
import urllib2
from contextlib import closing
from lxml import html
from PyQt5.Qt import QUrl
from calibre import browser, url_slash_cleaner
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class SmashwordsStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://www.smashwords.com/'
aff_id = '?ref=usernone'
# Use Kovid's affiliate id 30% of the time.
if random.randint(1, 10) in (1, 2, 3):
aff_id = '?ref=kovidgoyal'
detail_url = None
if detail_item:
detail_url = url + detail_item + aff_id
url = url + aff_id
if external or self.config.get('open_external', False):
open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url)))
else:
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://www.smashwords.com/books/search?query=' + urllib2.quote(query)
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//div[@id="pageCenterContent"]//div[@class="library-book"]'):
if counter <= 0:
break
data = html.fromstring(html.tostring(data))
id = None
id_a = ''.join(data.xpath('//a[contains(@class, "library-title")]/@href'))
if id_a:
id = id_a.split('/')[-1]
if not id:
continue
cover_url = ''.join(data.xpath('//img[contains(@class, "book-list-image")]/@src'))
title = ''.join(data.xpath('.//a[contains(@class, "library-title")]/text()'))
author = ''.join(data.xpath('.//div[@class="subnote"]//a[1]//text()'))
price = ''.join(data.xpath('.//div[@class="subnote"]//text()'))
if 'Price:' in price:
try:
price = price.partition('Price:')[2]
price = re.sub('\s', ' ', price).strip()
price = price.split(' ')[0]
price = price.strip()
except:
price = 'Unknown'
counter -= 1
s = SearchResult()
s.cover_url = cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price.strip()
s.detail_item = '/books/view/' + id.strip()
s.drm = SearchResult.DRM_UNLOCKED
yield s
def get_details(self, search_result, timeout):
url = 'http://www.smashwords.com/'
br = browser()
with closing(br.open(url + search_result.detail_item, timeout=timeout)) as nf:
idata = html.fromstring(nf.read())
search_result.formats = ', '.join(list(set(idata.xpath('//p//abbr//text()'))))
return True
| gpl-3.0 | -2,007,733,385,792,226,000 | 35.203883 | 98 | 0.547064 | false |
taborlab/FlowCal | FlowCal/excel_ui.py | 1 | 67063 | """
``FlowCal``'s Microsoft Excel User Interface.
This module contains functions to read, gate, and transform data from a set
of FCS files, as specified by an input Microsoft Excel file. This file
should contain the following tables:
- **Instruments**: Describes the instruments used to acquire the
samples listed in the other tables. Each instrument is specified by a
row containing at least the following fields:
- **ID**: Short string identifying the instrument. Will be referenced
by samples in the other tables.
- **Forward Scatter Channel**: Name of the forward scatter channel,
as specified by the ``$PnN`` keyword in the associated FCS files.
- **Side Scatter Channel**: Name of the side scatter channel, as
specified by the ``$PnN`` keyword in the associated FCS files.
- **Fluorescence Channels**: Name of the fluorescence channels in a
comma-separated list, as specified by the ``$PnN`` keyword in the
associated FCS files.
- **Time Channel**: Name of the time channel, as specified by the
``$PnN`` keyword in the associated FCS files.
- **Beads**: Describes the calibration beads samples that will be used
to calibrate cell samples in the **Samples** table. The following
information should be available for each beads sample:
- **ID**: Short string identifying the beads sample. Will be
referenced by cell samples in the **Samples** table.
- **Instrument ID**: ID of the instrument used to acquire the sample.
Must match one of the rows in the **Instruments** table.
- **File Path**: Path of the FCS file containing the sample's data.
- **<Fluorescence Channel Name> MEF Values**: The fluorescence in MEF
of each bead subpopulation, as given by the manufacturer, as a
comma-separated list of numbers. Any element of this list can be
replaced with the word ``None``, in which case the corresponding
subpopulation will not be used when fitting the beads fluorescence
model. Note that the number of elements in this list (including
the elements equal to ``None``) are the number of subpopulations
that ``FlowCal`` will try to find.
- **Gate fraction**: The fraction of events to keep from the sample
after density-gating in the forward/side scatter channels.
- **Clustering Channels**: The fluorescence channels used to identify
the different bead subpopulations.
- **Samples**: Describes the biological samples to be processed. The
following information should be available for each sample:
- **ID**: Short string identifying the sample. Will be used as part
of the plot's filenames and in the **Histograms** table in the
output Excel file.
- **Instrument ID**: ID of the instrument used to acquire the sample.
Must match one of the rows in the **Instruments** table.
- **Beads ID**: ID of the beads sample used to convert data to
calibrated MEF.
- **File Path**: Path of the FCS file containing the sample's data.
- **<Fluorescence Channel Name> Units**: Units to which the event
list in the specified fluorescence channel should be converted, and
all the subsequent plots and statistics should be reported. Should
be one of the following: "Channel" (raw units), "a.u." or "RFI"
(arbitrary units) or "MEF" (calibrated Molecules of Equivalent
Fluorophore). If "MEF" is specified, the **Beads ID** should be
populated, and should correspond to a beads sample with the
**MEF Values** specified for the same channel.
- **Gate fraction**: The fraction of events to keep from the sample
after density-gating in the forward/side scatter channels.
Any columns other than the ones specified above can be present, but will be
ignored by ``FlowCal``.
"""
import collections
import sys
import os
import os.path
import platform
import re
import six
import subprocess
import time
import warnings
# Tkinter is imported differently depending on which version of python we're
# using.
# six.PY2 is True when the code is running in python 2, False otherwise.
# six.PY3 is the equivalent for python 3.
if six.PY2:
from Tkinter import Tk
from tkFileDialog import askopenfilename
elif six.PY3:
from tkinter import Tk
from tkinter.filedialog import askopenfilename
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import openpyxl
import zipfile
import FlowCal.io
import FlowCal.plot
import FlowCal.gate
import FlowCal.transform
import FlowCal.stats
import FlowCal.mef
# Regular expressions for headers that specify some fluorescence channel
re_mef_values = re.compile(r'^\s*(\S(?:.*\S)?)\s+MEF\s+Values\s*$')
re_units = re.compile(r'^\s*(\S(?:.*\S)?)\s+Units\s*$')
class ExcelUIException(Exception):
"""
FlowCal Excel UI Error.
"""
pass
def read_table(filename, sheetname, index_col=None, engine=None):
"""
Return the contents of an Excel table as a pandas DataFrame.
Parameters
----------
filename : str
Name of the Excel file to read.
sheetname : str or int
Name or index of the sheet inside the Excel file to read.
index_col : str, optional
Column name or index to be used as row labels of the DataFrame. If
None, default index will be used.
engine : str, optional
Engine used by `pd.read_excel()` to read Excel file. If None, try
'openpyxl' then 'xlrd'.
Returns
-------
table : DataFrame
A DataFrame containing the data in the specified Excel table. If
`index_col` is not None, rows in which their `index_col` field
is empty will not be present in `table`.
Raises
------
ValueError
If `index_col` is specified and two rows contain the same
`index_col` field.
"""
# Catch sheetname as list or None
if sheetname is None or \
(hasattr(sheetname, '__iter__') \
and not isinstance(sheetname, six.string_types)):
raise TypeError("sheetname should specify a single sheet")
# Load excel table using pandas. (pandas >= v1.2.0 delays closing files in
# some situations, so open and close the file here.)
with open(filename, 'rb') as f:
file_in_mem = six.BytesIO(f.read())
read_excel_kwargs = {'io' : file_in_mem,
'sheet_name' : sheetname,
'index_col' : index_col}
if engine is None:
# try reading Excel file using openpyxl engine first, then xlrd
try:
read_excel_kwargs['engine'] = 'openpyxl'
table = pd.read_excel(**read_excel_kwargs)
except ValueError as e:
if not('openpyxl' in str(e).lower()
and 'unknown' in str(e).lower()):
raise
else:
# pandas does not recognize openpyxl (e.g., pandas
# version < 0.25.0), try xlrd
read_excel_kwargs['engine'] = 'xlrd'
table = pd.read_excel(**read_excel_kwargs)
except ImportError:
# pandas recognizes openpyxl but encountered an ImportError, try
# xlrd. Possible scenarios: openpyxl version is less than what
# pandas requires, openpyxl is missing (shouldn't happen)
read_excel_kwargs['engine'] = 'xlrd'
table = pd.read_excel(**read_excel_kwargs)
except openpyxl.utils.exceptions.InvalidFileException:
# unsupported file type (e.g., .xls), try xlrd
#
# (note: openpyxl's InvalidFileException has been stable at that
# location since v2.2.0)
read_excel_kwargs['engine'] = 'xlrd'
table = pd.read_excel(**read_excel_kwargs)
except zipfile.BadZipFile:
# pandas >= 1.2.0 opens the file and passes the file buffer to
# openpyxl, which may determine the file is not a zip file (e.g.,
# if it's a .xls file), try xlrd
read_excel_kwargs['engine'] = 'xlrd'
table = pd.read_excel(**read_excel_kwargs)
else:
read_excel_kwargs['engine'] = engine
table = pd.read_excel(**read_excel_kwargs)
# Eliminate rows whose index are null
if index_col is not None:
table = table[pd.notnull(table.index)]
# Check for duplicated rows
if table.index.has_duplicates:
raise ValueError("sheet {} on file {} contains duplicated values "
"for column {}".format(sheetname, filename, index_col))
return table
def write_workbook(filename, table_list, column_width=None):
"""
Write an Excel workbook from a list of tables.
Parameters
----------
filename : str
Name of the Excel file to write.
table_list : list of ``(str, DataFrame)`` tuples
Tables to be saved as individual sheets in the Excel table. Each
tuple contains two values: the name of the sheet to be saved as a
string, and the contents of the table as a DataFrame.
column_width: int or float, optional
The column width to use when saving the spreadsheet. If None,
calculate width automatically from the maximum number of characters
in each column.
"""
# Modify default header format
# Pandas' default header format is bold text with thin borders. Here we
# use bold text only, without borders.
# Also, wrap in a try-except block in case style structure is not found.
header_format_modified = False
try:
# Get format module
import pandas.io.formats.excel as format_module
# Save previous style, replace, and indicate that previous style should
# be restored at the end
old_header_style = format_module.header_style
format_module.header_style = {"font": {"bold": True}}
header_format_modified = True
except AttributeError as e:
pass
# Generate output writer object
writer = pd.ExcelWriter(filename, engine='openpyxl')
# Write tables
for sheet_name, df in table_list:
# Convert index names to regular columns
df = df.reset_index()
# Write to an Excel sheet
df.to_excel(writer, sheet_name=sheet_name, index=False)
# Set column width
for i, (col_name, column) in enumerate(six.iteritems(df)):
if column_width is None:
# Get the maximum number of characters in a column
max_chars_col = column.astype(str).str.len().max()
max_chars_col = max(len(col_name), max_chars_col)
width = float(max_chars_col)
else:
width = float(column_width)
# Write width
col_letter = openpyxl.utils.get_column_letter(i+1)
writer.sheets[sheet_name].column_dimensions[col_letter].width = width
# Save and close file
writer.close()
# Restore previous header format
if header_format_modified:
format_module.header_style = old_header_style
def process_beads_table(beads_table,
instruments_table,
base_dir=".",
verbose=False,
plot=False,
plot_dir=None,
full_output=False,
get_transform_fxn_kwargs={}):
"""
Process calibration bead samples, as specified by an input table.
This function processes the entries in `beads_table`. For each row, the
function does the following:
- Load the FCS file specified in the field "File Path".
- Transform the forward scatter/side scatter and fluorescence
channels to RFI
- Remove the 250 first and 100 last events.
- Remove saturated events in the forward scatter and side scatter
channels.
- Apply density gating on the forward scatter/side scatter
channels.
- Generate a standard curve transformation function, for each
fluorescence channel in which the associated MEF values are
specified.
- Generate forward/side scatter density plots and fluorescence
histograms, and plots of the clustering and fitting steps of
standard curve generation, if `plot` = True.
Names of forward/side scatter and fluorescence channels are taken from
`instruments_table`.
Parameters
----------
beads_table : DataFrame
Table specifying beads samples to be processed. For more
information about the fields required in this table, please consult
the module's documentation.
instruments_table : DataFrame
Table specifying instruments. For more information about the fields
required in this table, please consult the module's documentation.
base_dir : str, optional
Directory from where all the other paths are specified.
verbose : bool, optional
Whether to print information messages during the execution of this
function.
plot : bool, optional
Whether to generate and save density/histogram plots of each
sample, and each beads sample.
plot_dir : str, optional
Directory relative to `base_dir` into which plots are saved. If
`plot` is False, this parameter is ignored. If ``plot==True`` and
``plot_dir is None``, plot without saving.
full_output : bool, optional
Flag indicating whether to include an additional output, containing
intermediate results from the generation of the MEF transformation
functions.
get_transform_fxn_kwargs : dict, optional
Additional parameters passed directly to internal
``mef.get_transform_fxn()`` function call.
Returns
-------
beads_samples : OrderedDict
Processed, gated, and transformed samples, indexed by
``beads_table.index``.
mef_transform_fxns : OrderedDict
MEF transformation functions, indexed by ``beads_table.index``.
mef_outputs : OrderedDict, only if ``full_output==True``
Intermediate results from the generation of the MEF transformation
functions. For every entry in `beads_table`,
:func:`FlowCal.mef.get_transform_fxn()` is called on the
corresponding processed and gated beads sample with
``full_output=True``, and the full output (a `MEFOutput`
``namedtuple``) is added to `mef_outputs`. `mef_outputs` is indexed
by ``beads_table.index``. Refer to the documentation for
:func:`FlowCal.mef.get_transform_fxn()` for more information.
"""
# Initialize output variables
beads_samples = collections.OrderedDict()
mef_transform_fxns = collections.OrderedDict()
mef_outputs = collections.OrderedDict()
# Return empty structures if beads table is empty
if beads_table.empty:
if full_output:
return beads_samples, mef_transform_fxns, mef_outputs
else:
return beads_samples, mef_transform_fxns
if verbose:
msg = "Processing Beads table ({} entries)".format(len(beads_table))
print("")
print(msg)
print("="*len(msg))
# Check that plotting directory exist, create otherwise
if plot and plot_dir is not None \
and not os.path.exists(os.path.join(base_dir, plot_dir)):
os.makedirs(os.path.join(base_dir, plot_dir))
# Extract header and channel names for which MEF values are specified.
headers = list(beads_table.columns)
mef_headers_all = [h for h in headers if re_mef_values.match(h)]
mef_channels_all = [re_mef_values.match(h).group(1)
for h in mef_headers_all]
# Iterate through table
# We will look for a ExcelUIException on each iteration. If an exception
# is caught, it will be stored in beads_samples.
for beads_id, beads_row in beads_table.iterrows():
try:
###
# Instrument Data
###
# Get the appropriate row in the instrument table
instruments_row = instruments_table.loc[beads_row['Instrument ID']]
# Scatter channels: Foward Scatter, Side Scatter
sc_channels = [instruments_row['Forward Scatter Channel'],
instruments_row['Side Scatter Channel'],
]
# Fluorescence channels is a comma-separated list
fl_channels = instruments_row['Fluorescence Channels'].split(',')
fl_channels = [s.strip() for s in fl_channels]
###
# Beads Data
###
if verbose:
print("\nBeads ID {}...".format(beads_id))
print("Loading file \"{}\"...".format(beads_row['File Path']))
# Attempt to open file
filename = os.path.join(base_dir, beads_row['File Path'])
try:
beads_sample = FlowCal.io.FCSData(filename)
except IOError:
raise ExcelUIException("file \"{}\" not found".format(
beads_row['File Path']))
# Check that the number of events is greater than 400
if beads_sample.shape[0] < 400:
raise ExcelUIException("number of events is lower than 400")
###
# Transform
###
if verbose:
print("Performing data transformation...")
# Transform FSC/SSC and fluorescence channels to linear scale
beads_sample = FlowCal.transform.to_rfi(beads_sample,
sc_channels + fl_channels)
# Parse clustering channels data
cluster_channels = beads_row['Clustering Channels'].split(',')
cluster_channels = [cc.strip() for cc in cluster_channels]
###
# Gate
###
if verbose:
print("Performing gating...")
# Remove first and last events. Transients in fluidics can make the
# first few and last events slightly different from the rest.
beads_sample_gated = FlowCal.gate.start_end(beads_sample,
num_start=250,
num_end=100)
# Remove saturating events in forward/side scatter, if the FCS data
# type is integer. The value of a saturating event is taken
# automatically from `beads_sample_gated.range`.
if beads_sample_gated.data_type == 'I':
beads_sample_gated = FlowCal.gate.high_low(
beads_sample_gated,
channels=sc_channels)
# Density gating
try:
density_gate_output = FlowCal.gate.density2d(
data=beads_sample_gated,
channels=sc_channels,
gate_fraction=beads_row['Gate Fraction'],
xscale='logicle',
yscale='logicle',
sigma=5.,
full_output=True)
beads_sample_gated = density_gate_output.gated_data
gate_contour = density_gate_output.contour
except ValueError as ve:
raise ExcelUIException(ve.message)
# Plot forward/side scatter density plot and fluorescence histograms
if plot:
if verbose:
print("Plotting density plot and histogram...")
# Density plot parameters
density_params = {}
density_params['mode'] = 'scatter'
density_params["title"] = "{} ({:.1f}% retained)".format(
beads_id,
beads_sample_gated.shape[0] * 100. / beads_sample.shape[0])
density_params['xscale'] = 'logicle'
density_params['yscale'] = 'logicle'
# Beads have a tight distribution, so axis limits will be set
# from 0.75 decades below the 5th percentile to 0.75 decades
# above the 95th percentile.
density_params['xlim'] = \
(np.percentile(beads_sample_gated[:, sc_channels[0]],
5) / (10**0.75),
np.percentile(beads_sample_gated[:, sc_channels[0]],
95) * (10**0.75),
)
density_params['ylim'] = \
(np.percentile(beads_sample_gated[:, sc_channels[1]],
5) / (10**0.75),
np.percentile(beads_sample_gated[:, sc_channels[1]],
95) * (10**0.75),
)
# Beads have a tight distribution, so less smoothing should be
# applied for visualization
density_params['sigma'] = 5.
# Histogram plot parameters
hist_params = {'xscale': 'logicle'}
# Plot
if plot_dir is not None:
figname = os.path.join(
base_dir,
plot_dir,
"density_hist_{}.png".format(beads_id))
else:
figname = None
plt.figure(figsize=(6,4))
FlowCal.plot.density_and_hist(
beads_sample,
beads_sample_gated,
density_channels=sc_channels,
hist_channels=cluster_channels,
gate_contour=gate_contour,
density_params=density_params,
hist_params=hist_params,
savefig=figname)
###
# Process MEF values
###
# For each fluorescence channel, check whether a list of known MEF
# values of the bead subpopulations is provided in `beads_row`. This
# involves checking that a column named "[channel] MEF Values"
# exists and is not empty. If so, store the name of the channel in
# `mef_channels`, and the specified MEF values in `mef_values`.
###
mef_values = []
mef_channels = []
for fl_channel in fl_channels:
if fl_channel in mef_channels_all:
# Get header from channel name
mef_header = \
mef_headers_all[mef_channels_all.index(fl_channel)]
# Extract text. If empty, ignore.
mef_str = beads_row[mef_header]
if pd.isnull(mef_str):
continue
# Save channel name
mef_channels.append(fl_channel)
# Parse list of values
mef = mef_str.split(',')
mef = [int(e) if e.strip().isdigit() else np.nan
for e in mef]
mef_values.append(mef)
# Ensure matching number of `mef_values` for all channels (this
# implies that the calibration beads have the same number of
# subpopulations for all channels).
if mef_values:
if not np.all([len(mef_values_channel)==len(mef_values[0])
for mef_values_channel in mef_values]):
raise ExcelUIException("Must specify the same number of"
+ " MEF Values for each channel."
+ " Use 'None' to instruct FlowCal"
+ " to ignore a detected"
+ " subpopulation.")
mef_values = np.array(mef_values)
# Obtain standard curve transformation
if mef_channels:
if verbose:
if len(mef_channels) == 1:
print("Calculating standard curve for channel {}..." \
.format(mef_channels[0]))
else:
print("Calculating standard curve for channels {}..." \
.format(", ".join(mef_channels)))
mef_output = FlowCal.mef.get_transform_fxn(
beads_sample_gated,
mef_values,
mef_channels=mef_channels,
clustering_channels=cluster_channels,
verbose=False,
plot=plot,
plot_filename=beads_id,
plot_dir=os.path.join(base_dir, plot_dir) \
if plot_dir is not None else None,
full_output=full_output,
**get_transform_fxn_kwargs)
if full_output:
mef_transform_fxn = mef_output.transform_fxn
else:
mef_transform_fxn = mef_output
else:
mef_transform_fxn = None
mef_output = None
except ExcelUIException as e:
# Print Exception message
if verbose:
print("ERROR: {}".format(str(e)))
# Add exception to beads_samples dictionary, and None to
# everything else
beads_samples[beads_id] = e
mef_transform_fxns[beads_id] = None
if full_output:
mef_outputs[beads_id] = None
else:
# If no errors were found, store results
beads_samples[beads_id] = beads_sample_gated
mef_transform_fxns[beads_id] = mef_transform_fxn
if full_output:
mef_outputs[beads_id] = mef_output
if full_output:
return beads_samples, mef_transform_fxns, mef_outputs
else:
return beads_samples, mef_transform_fxns
def process_samples_table(samples_table,
instruments_table,
mef_transform_fxns=None,
beads_table=None,
base_dir=".",
verbose=False,
plot=False,
plot_dir=None):
"""
Process flow cytometry samples, as specified by an input table.
The function processes each entry in `samples_table`, and does the
following:
- Load the FCS file specified in the field "File Path".
- Transform the forward scatter/side scatter to RFI.
- Transform the fluorescence channels to the units specified in the
column "<Channel name> Units".
- Remove the 250 first and 100 last events.
- Remove saturated events in the forward scatter and side scatter
channels.
- Apply density gating on the forward scatter/side scatter
channels.
- Plot combined forward/side scatter density plots and fluorescence
historgrams, if `plot` = True.
Names of forward/side scatter and fluorescence channels are taken from
`instruments_table`.
Parameters
----------
samples_table : DataFrame
Table specifying samples to be processed. For more information
about the fields required in this table, please consult the
module's documentation.
instruments_table : DataFrame
Table specifying instruments. For more information about the fields
required in this table, please consult the module's documentation.
mef_transform_fxns : dict or OrderedDict, optional
Dictionary containing MEF transformation functions. If any entry
in `samples_table` requires transformation to MEF, a key: value
pair must exist in mef_transform_fxns, with the key being equal to
the contents of field "Beads ID".
beads_table : DataFrame, optional
Table specifying beads samples used to generate
`mef_transform_fxns`. This is used to check if a beads sample was
taken at the same acquisition settings as a sample to be
transformed to MEF. For any beads sample and channel for which a
MEF transformation function has been generated, the following
fields should be populated: ``<channel> Amp. Type`` and
``<channel> Detector Volt``. If `beads_table` is not specified, no
checking will be performed.
base_dir : str, optional
Directory from where all the other paths are specified.
verbose : bool, optional
Whether to print information messages during the execution of this
function.
plot : bool, optional
Whether to generate and save density/histogram plots of each
sample, and each beads sample.
plot_dir : str, optional
Directory relative to `base_dir` into which plots are saved. If
`plot` is False, this parameter is ignored. If ``plot==True`` and
``plot_dir is None``, plot without saving.
Returns
-------
samples : OrderedDict
Processed, gated, and transformed samples, indexed by
``samples_table.index``.
"""
# Initialize output variable
samples = collections.OrderedDict()
# Return empty dictionary if samples table is empty
if samples_table.empty:
return samples
if verbose:
msg = "Processing Samples table ({} entries)".format(len(samples_table))
print("")
print(msg)
print("="*len(msg))
# Check that plotting directory exist, create otherwise
if plot and plot_dir is not None \
and not os.path.exists(os.path.join(base_dir, plot_dir)):
os.makedirs(os.path.join(base_dir, plot_dir))
# Extract header and channel names for which units are specified.
headers = list(samples_table.columns)
report_headers_all = [h for h in headers if re_units.match(h)]
report_channels_all = [re_units.match(h).group(1)
for h in report_headers_all]
# Iterate through table
# We will look for a ExcelUIException on each iteration. If an exception
# is caught, it will be stored in beads_samples.
for sample_id, sample_row in samples_table.iterrows():
try:
###
# Instrument Data
###
# Get the appropriate row in the instrument table
instruments_row = instruments_table.loc[sample_row['Instrument ID']]
# Scatter channels: Foward Scatter, Side Scatter
sc_channels = [instruments_row['Forward Scatter Channel'],
instruments_row['Side Scatter Channel'],
]
# Fluorescence channels is a comma-separated list
fl_channels = instruments_row['Fluorescence Channels'].split(',')
fl_channels = [s.strip() for s in fl_channels]
###
# Sample Data
###
if verbose:
print("\nSample ID {}...".format(sample_id))
print("Loading file \"{}\"...".format(sample_row['File Path']))
# Attempt to open file
filename = os.path.join(base_dir, sample_row['File Path'])
try:
sample = FlowCal.io.FCSData(filename)
except IOError:
raise ExcelUIException("file \"{}\" not found".format(
sample_row['File Path']))
# Check that the number of events is greater than 400
if sample.shape[0] < 400:
raise ExcelUIException("number of events is lower than 400")
###
# Transform
###
if verbose:
print("Performing data transformation...")
# Transform FSC/SSC to linear scale
sample = FlowCal.transform.to_rfi(sample, sc_channels)
# Parse fluorescence channels in which to transform
report_channels = []
report_units = []
for fl_channel in fl_channels:
if fl_channel in report_channels_all:
# Get header from channel name
report_header = report_headers_all[
report_channels_all.index(fl_channel)]
# Extract text. If empty, ignore.
units_str = sample_row[report_header]
if pd.isnull(units_str):
continue
# Decide what transformation to perform
units = units_str.strip()
if units.lower() == 'channel':
units_label = "Channel Number"
elif units.lower() == 'rfi':
units_label = "Relative Fluorescence Intensity, RFI"
sample = FlowCal.transform.to_rfi(sample, fl_channel)
elif units.lower() == 'a.u.' or units.lower() == 'au':
units_label = "Arbitrary Units, a.u."
sample = FlowCal.transform.to_rfi(sample, fl_channel)
elif units.lower() == 'mef':
units_label = "Molecules of Equivalent Fluorophore, MEF"
# Check if transformation function is available
if mef_transform_fxns[sample_row['Beads ID']] is None:
raise ExcelUIException("MEF transformation "
"function not available")
# If beads_table is available, check if the same
# settings have been used to acquire the corresponding
# beads sample
if beads_table is not None:
beads_row = beads_table.loc[sample_row['Beads ID']]
# Instrument
beads_iid = beads_row['Instrument ID']
if beads_iid != sample_row['Instrument ID']:
raise ExcelUIException("Instruments for "
"acquisition of beads and samples are not "
"the same (beads {}'s instrument: {}, "
"sample's instrument: {})".format(
sample_row['Beads ID'],
beads_iid,
sample_row['Instrument ID']))
# Amplification type
beads_at = beads_row['{} Amp. Type'. \
format(fl_channel)]
if sample.amplification_type(fl_channel)[0]:
sample_at = "Log"
else:
sample_at = "Linear"
if beads_at != sample_at:
raise ExcelUIException("Amplification type for "
"acquisition of beads and samples in "
"channel {} are not the same (beads {}'s "
"amplification: {}, sample's "
"amplification: {})".format(
fl_channel,
sample_row['Beads ID'],
beads_at,
sample_at))
# Detector voltage
beads_dv = beads_row['{} Detector Volt.'. \
format(fl_channel)]
if sample.detector_voltage(fl_channel) is not None \
and beads_dv != sample.detector_voltage(
fl_channel):
raise ExcelUIException("Detector voltage for "
"acquisition of beads and samples in "
"channel {} are not the same (beads {}'s "
"detector voltage: {}, sample's "
"detector voltage: {})".format(
fl_channel,
sample_row['Beads ID'],
beads_dv,
sample.detector_voltage(fl_channel)))
# First, transform to RFI
sample = FlowCal.transform.to_rfi(sample, fl_channel)
# Attempt to transform to MEF
# Transformation function raises a ValueError if a
# standard curve does not exist for a channel
try:
sample = mef_transform_fxns[sample_row['Beads ID']](
sample,
fl_channel)
except ValueError:
raise ExcelUIException("no standard curve for "
"channel {}".format(fl_channel))
else:
raise ExcelUIException("units \"{}\" not recognized". \
format(units, sample_id))
# Register that reporting in this channel must be done
report_channels.append(fl_channel)
report_units.append(units_label)
###
# Gate
###
if verbose:
print("Performing gating...")
# Remove first and last events. Transients in fluidics can make the
# first few and last events slightly different from the rest.
sample_gated = FlowCal.gate.start_end(sample,
num_start=250,
num_end=100)
# Remove saturating events in forward/side scatter, and fluorescent
# channels to report, if the FCS data type is integer. The value of
# a saturating event is taken automatically from
# `sample_gated.range`.
if sample_gated.data_type == 'I':
sample_gated = FlowCal.gate.high_low(
sample_gated,
sc_channels + report_channels)
# Density gating
try:
density_gate_output = FlowCal.gate.density2d(
data=sample_gated,
channels=sc_channels,
gate_fraction=sample_row['Gate Fraction'],
xscale='logicle',
yscale='logicle',
full_output=True)
sample_gated = density_gate_output.gated_data
gate_contour = density_gate_output.contour
except ValueError as ve:
raise ExcelUIException(ve.message)
# Plot forward/side scatter density plot and fluorescence histograms
if plot:
if verbose:
print("Plotting density plot and histogram...")
# Density plot parameters
density_params = {}
density_params['mode'] = 'scatter'
density_params["title"] = "{} ({:.1f}% retained)".format(
sample_id,
sample_gated.shape[0] * 100. / sample.shape[0])
density_params['xscale'] = 'logicle'
density_params['yscale'] = 'logicle'
# Histogram plot parameters
hist_params = []
for rc, ru in zip(report_channels, report_units):
param = {}
param['xlabel'] = '{} ({})'.format(rc, ru)
# Only channel numbers are plotted in linear scale
if (ru == 'Channel Number'):
param['xscale'] = 'linear'
else:
param['xscale'] = 'logicle'
hist_params.append(param)
# Plot
if plot_dir is not None:
figname = os.path.join(
base_dir,
plot_dir,
"{}.png".format(sample_id))
else:
figname = None
FlowCal.plot.density_and_hist(
sample,
sample_gated,
gate_contour=gate_contour,
density_channels=sc_channels,
density_params=density_params,
hist_channels=report_channels,
hist_params=hist_params,
savefig=figname)
except ExcelUIException as e:
# Print Exception message
if verbose:
print("ERROR: {}".format(str(e)))
# Add exception to samples dictionary
samples[sample_id] = e
else:
# If no errors were found, store results
samples[sample_id] = sample_gated
return samples
def add_beads_stats(beads_table, beads_samples, mef_outputs=None):
"""
Add stats fields to beads table.
The following information is added to each row:
- Notes (warnings, errors) resulting from the analysis
- Number of Events
- Acquisition Time (s)
The following information is added for each row, for each channel in
which MEF values have been specified:
- Detector voltage (gain)
- Amplification type
- Bead model fitted parameters
Parameters
----------
beads_table : DataFrame
Table specifying bead samples to analyze. For more information
about the fields required in this table, please consult the
module's documentation.
beads_samples : dict or OrderedDict
FCSData objects from which to calculate statistics.
``beads_samples[id]`` should correspond to ``beads_table.loc[id,:]``.
mef_outputs : dict or OrderedDict, optional
Intermediate results from the generation of the MEF transformation
functions, as given by ``mef.get_transform_fxn()``. This is used to
populate the fields ``<channel> Beads Model``,
``<channel> Beads Params. Names``, and
``<channel> Beads Params. Values``. If specified,
``mef_outputs[id]`` should correspond to ``beads_table.loc[id,:]``.
"""
# The index name is not preserved if beads_table is empty.
# Save the index name for later
beads_table_index_name = beads_table.index.name
# Add per-row info
notes = []
n_events = []
acq_time = []
for row_id in beads_table.index:
# Check if sample is an exception, otherwise assume it's an FCSData
if isinstance(beads_samples[row_id], ExcelUIException):
# Print error message
notes.append("ERROR: {}".format(str(beads_samples[row_id])))
n_events.append(np.nan)
acq_time.append(np.nan)
else:
notes.append('')
n_events.append(beads_samples[row_id].shape[0])
acq_time.append(beads_samples[row_id].acquisition_time)
beads_table['Analysis Notes'] = notes
beads_table['Number of Events'] = n_events
beads_table['Acquisition Time (s)'] = acq_time
# List of channels that require stats columns
headers = list(beads_table.columns)
stats_headers = [h for h in headers if re_mef_values.match(h)]
stats_channels = [re_mef_values.match(h).group(1) for h in stats_headers]
# Iterate through channels
for header, channel in zip(stats_headers, stats_channels):
# Add empty columns to table
beads_table[channel + ' Detector Volt.'] = np.nan
beads_table[channel + ' Amp. Type'] = ""
if mef_outputs:
beads_table[channel + ' Beads Model'] = ""
beads_table[channel + ' Beads Params. Names'] = ""
beads_table[channel + ' Beads Params. Values'] = ""
# Iterate
for row_id in beads_table.index:
# If error, skip
if isinstance(beads_samples[row_id], ExcelUIException):
continue
# If MEF values are specified, calculate stats. If not, leave empty.
if pd.notnull(beads_table[header][row_id]):
# Detector voltage
beads_table.at[row_id, channel + ' Detector Volt.'] = \
beads_samples[row_id].detector_voltage(channel)
# Amplification type
if beads_samples[row_id].amplification_type(channel)[0]:
amplification_type = "Log"
else:
amplification_type = "Linear"
beads_table.at[row_id, channel + ' Amp. Type'] = \
amplification_type
# Bead model and parameters
# Only populate if mef_outputs has been provided
if mef_outputs:
# Try to find the current channel among the mef'd channels.
# If successful, extract bead fitted parameters.
try:
mef_channel_index = mef_outputs[row_id]. \
mef_channels.index(channel)
except ValueError:
pass
else:
# Bead model
beads_model_str = mef_outputs[row_id]. \
fitting['beads_model_str'][mef_channel_index]
beads_table.at[row_id, channel + ' Beads Model'] = \
beads_model_str
# Bead parameter names
params_names = mef_outputs[row_id]. \
fitting['beads_params_names'][mef_channel_index]
params_names_str = ", ".join([str(p)
for p in params_names])
beads_table.at[
row_id,
channel + ' Beads Params. Names'] = \
params_names_str
# Bead parameter values
params = mef_outputs[row_id]. \
fitting['beads_params'][mef_channel_index]
params_str = ", ".join([str(p) for p in params])
beads_table.at[
row_id,
channel + ' Beads Params. Values'] = \
params_str
# Restore index name if table is empty
if len(beads_table) == 0:
beads_table.index.name = beads_table_index_name
def add_samples_stats(samples_table, samples):
"""
Add stats fields to samples table.
The following information is added to each row:
- Notes (warnings, errors) resulting from the analysis
- Number of Events
- Acquisition Time (s)
The following information is added for each row, for each channel in
which fluorescence units have been specified:
- Detector voltage (gain)
- Amplification type
- Mean
- Geometric Mean
- Median
- Mode
- Standard Deviation
- Coefficient of Variation (CV)
- Geometric Standard Deviation
- Geometric Coefficient of Variation
- Inter-Quartile Range
- Robust Coefficient of Variation (RCV)
Parameters
----------
samples_table : DataFrame
Table specifying samples to analyze. For more information about the
fields required in this table, please consult the module's
documentation.
samples : dict or OrderedDict
FCSData objects from which to calculate statistics. ``samples[id]``
should correspond to ``samples_table.loc[id,:]``.
Notes
-----
Geometric statistics (geometric mean, standard deviation, and geometric
coefficient of variation) are defined only for positive data. If there
are negative events in any relevant channel of any member of `samples`,
geometric statistics will only be calculated on the positive events,
and a warning message will be written to the "Analysis Notes" field.
"""
# The index name is not preserved if samples_table is empty.
# Save the index name for later
samples_table_index_name = samples_table.index.name
# Add per-row info
notes = []
n_events = []
acq_time = []
for row_id in samples_table.index:
# Check if sample is an exception, otherwise assume it's an FCSData
if isinstance(samples[row_id], ExcelUIException):
# Print error message
notes.append("ERROR: {}".format(str(samples[row_id])))
n_events.append(np.nan)
acq_time.append(np.nan)
else:
notes.append('')
n_events.append(samples[row_id].shape[0])
acq_time.append(samples[row_id].acquisition_time)
samples_table['Analysis Notes'] = notes
samples_table['Number of Events'] = n_events
samples_table['Acquisition Time (s)'] = acq_time
# List of channels that require stats columns
headers = list(samples_table.columns)
stats_headers = [h for h in headers if re_units.match(h)]
stats_channels = [re_units.match(h).group(1) for h in stats_headers]
# Iterate through channels
for header, channel in zip(stats_headers, stats_channels):
# Add empty columns to table
samples_table[channel + ' Detector Volt.'] = np.nan
samples_table[channel + ' Amp. Type'] = ""
samples_table[channel + ' Mean'] = np.nan
samples_table[channel + ' Geom. Mean'] = np.nan
samples_table[channel + ' Median'] = np.nan
samples_table[channel + ' Mode'] = np.nan
samples_table[channel + ' Std'] = np.nan
samples_table[channel + ' CV'] = np.nan
samples_table[channel + ' Geom. Std'] = np.nan
samples_table[channel + ' Geom. CV'] = np.nan
samples_table[channel + ' IQR'] = np.nan
samples_table[channel + ' RCV'] = np.nan
for row_id in samples_table.index:
# If error, skip
if isinstance(samples[row_id], ExcelUIException):
continue
# If units are specified, calculate stats. If not, leave empty.
if pd.notnull(samples_table[header][row_id]):
# Acquisition settings
# Detector voltage
samples_table.at[row_id, channel + ' Detector Volt.'] = \
samples[row_id].detector_voltage(channel)
# Amplification type
if samples[row_id].amplification_type(channel)[0]:
amplification_type = "Log"
else:
amplification_type = "Linear"
samples_table.at[row_id, channel + ' Amp. Type'] = \
amplification_type
# Statistics from event list
samples_table.at[row_id, channel + ' Mean'] = \
FlowCal.stats.mean(samples[row_id], channel)
samples_table.at[row_id, channel + ' Median'] = \
FlowCal.stats.median(samples[row_id], channel)
samples_table.at[row_id, channel + ' Mode'] = \
FlowCal.stats.mode(samples[row_id], channel)
samples_table.at[row_id, channel + ' Std'] = \
FlowCal.stats.std(samples[row_id], channel)
samples_table.at[row_id, channel + ' CV'] = \
FlowCal.stats.cv(samples[row_id], channel)
samples_table.at[row_id, channel + ' IQR'] = \
FlowCal.stats.iqr(samples[row_id], channel)
samples_table.at[row_id, channel + ' RCV'] = \
FlowCal.stats.rcv(samples[row_id], channel)
# For geometric statistics, first check for non-positive events.
# If found, throw a warning and calculate statistics on positive
# events only.
if np.any(samples[row_id][:, channel] <= 0):
# Separate positive events
sample_positive = \
samples[row_id][samples[row_id][:, channel] > 0]
# Throw warning
msg = "Geometric statistics for channel" + \
" {} calculated on positive events".format(channel) + \
" only ({:.1f}%). ".format(
100.*sample_positive.shape[0]/samples[row_id].shape[0])
warnings.warn("On sample {}: {}".format(row_id, msg))
# Write warning message to table
if samples_table.loc[row_id, 'Analysis Notes']:
msg = samples_table.loc[row_id, 'Analysis Notes'] + msg
samples_table.at[row_id, 'Analysis Notes'] = msg
else:
sample_positive = samples[row_id]
# Calculate and write geometric statistics
samples_table.at[row_id, channel + ' Geom. Mean'] = \
FlowCal.stats.gmean(sample_positive, channel)
samples_table.at[row_id, channel + ' Geom. Std'] = \
FlowCal.stats.gstd(sample_positive, channel)
samples_table.at[row_id, channel + ' Geom. CV'] = \
FlowCal.stats.gcv(sample_positive, channel)
# Restore index name if table is empty
if len(samples_table) == 0:
samples_table.index.name = samples_table_index_name
def generate_histograms_table(samples_table, samples, max_bins=1024):
"""
Generate a table of histograms as a DataFrame.
Parameters
----------
samples_table : DataFrame
Table specifying samples to analyze. For more information about the
fields required in this table, please consult the module's
documentation.
samples : dict or OrderedDict
FCSData objects from which to calculate statistics. ``samples[id]``
should correspond to ``samples_table.loc[id,:]``.
max_bins : int, optional
Maximum number of bins to use.
Returns
-------
hist_table : DataFrame
A multi-indexed DataFrame. Rows contain the histogram bins and
counts for every sample and channel specified in samples_table.
`hist_table` is indexed by the sample's ID, the channel name,
and whether the row corresponds to bins or counts.
"""
# Extract channels that require stats histograms
headers = list(samples_table.columns)
hist_headers = [h for h in headers if re_units.match(h)]
hist_channels = [re_units.match(h).group(1) for h in hist_headers]
# The number of columns in the DataFrame has to be set to the maximum
# number of bins of any of the histograms about to be generated.
# The following iterates through these histograms and finds the
# largest.
n_columns = 0
for sample_id in samples_table.index:
if isinstance(samples[sample_id], ExcelUIException):
continue
for header, channel in zip(hist_headers, hist_channels):
if pd.notnull(samples_table[header][sample_id]):
if n_columns < samples[sample_id].resolution(channel):
n_columns = samples[sample_id].resolution(channel)
# Saturate at max_bins
if n_columns > max_bins:
n_columns = max_bins
# Declare multi-indexed DataFrame
index = pd.MultiIndex.from_arrays([[],[],[]],
names = ['Sample ID', 'Channel', ''])
columns = ['Bin {}'.format(i + 1) for i in range(n_columns)]
hist_table = pd.DataFrame([], index=index, columns=columns)
# Generate histograms
for sample_id in samples_table.index:
if isinstance(samples[sample_id], ExcelUIException):
continue
for header, channel in zip(hist_headers, hist_channels):
if pd.notnull(samples_table[header][sample_id]):
# Get units in which bins are being reported
unit = samples_table[header][sample_id]
# Decide which scale to use
# Channel units result in linear scale. Otherwise, use logicle.
if unit == 'Channel':
scale = 'linear'
else:
scale = 'logicle'
# Define number of bins
nbins = min(samples[sample_id].resolution(channel), max_bins)
# Calculate bin edges and centers
# We generate twice the necessary number of bins. We then take
# every other value as the proper bin edges, and the remaining
# values as the bin centers.
bins_extended = samples[sample_id].hist_bins(channel,
2*nbins,
scale)
bin_edges = bins_extended[::2]
bin_centers = bins_extended[1::2]
# Store bin centers
hist_table.loc[(sample_id,
channel,
'Bin Centers ({})'.format(unit)),
columns[0:len(bin_centers)]] = bin_centers
# Calculate and store histogram counts
hist, __ = np.histogram(samples[sample_id][:,channel],
bins=bin_edges)
hist_table.loc[(sample_id, channel, 'Counts'),
columns[0:len(bin_centers)]] = hist
return hist_table
def generate_about_table(extra_info={}):
"""
Make a table with information about FlowCal and the current analysis.
Parameters
----------
extra_info : dict, optional
Additional keyword:value pairs to include in the table.
Returns
-------
about_table : DataFrame
Table with information about FlowCal and the current analysis, as
keyword:value pairs. The following keywords are included: FlowCal
version, and date and time of analysis. Keywords and values from
`extra_info` are also included.
"""
# Make keyword and value arrays
keywords = []
values = []
# FlowCal version
keywords.append('FlowCal version')
values.append(FlowCal.__version__)
# Analysis date and time
keywords.append('Date of analysis')
values.append(time.strftime("%Y/%m/%d"))
keywords.append('Time of analysis')
values.append(time.strftime("%I:%M:%S%p"))
# Add additional keyword:value pairs
for k, v in six.iteritems(extra_info):
keywords.append(k)
values.append(v)
# Make table as data frame
about_table = pd.DataFrame(values, index=keywords)
# Set column names
about_table.columns = ['Value']
about_table.index.name = 'Keyword'
return about_table
def show_open_file_dialog(filetypes):
"""
Show an open file dialog and return the path of the file selected.
Parameters
----------
filetypes : list of tuples
Types of file to show on the dialog. Each tuple on the list must
have two elements associated with a filetype: the first element is
a description, and the second is the associated extension.
Returns
-------
filename : str
The path of the filename selected, or an empty string if no file
was chosen.
"""
# initialize tkinter root window
root = Tk()
# remove main root window (will cause kernel panic on OSX if not present)
root.withdraw()
# link askopenfilename window to root window
filename = askopenfilename(parent = root, filetypes=filetypes)
# refresh root window to remove askopenfilename window
root.update()
return filename
def run(input_path=None,
output_path=None,
verbose=True,
plot=True,
hist_sheet=False):
"""
Run the MS Excel User Interface.
This function performs the following:
1. If `input_path` is not specified, show a dialog to choose an input
Excel file.
2. Extract data from the Instruments, Beads, and Samples tables.
3. Process all the bead samples specified in the Beads table.
4. Generate statistics for each bead sample.
5. Process all the cell samples in the Samples table.
6. Generate statistics for each sample.
7. If requested, generate a histogram table for each fluorescent
channel specified for each sample.
8. Generate a table with run time, date, FlowCal version, among
others.
9. Save statistics and (if requested) histograms in an output Excel
file.
Parameters
----------
input_path : str
Path to the Excel file to use as input. If None, show a dialog to
select an input file.
output_path : str
Path to which to save the output Excel file. If None, use
"<input_path>_output".
verbose : bool, optional
Whether to print information messages during the execution of this
function.
plot : bool, optional
Whether to generate and save density/histogram plots of each
sample, and each beads sample.
hist_sheet : bool, optional
Whether to generate a sheet in the output Excel file specifying
histogram bin information.
"""
# If input file has not been specified, show open file dialog
if input_path is None:
input_path = show_open_file_dialog(filetypes=[('Excel files',
'*.xlsx')])
if not input_path:
if verbose:
print("No input file selected.")
return
# Extract directory, filename, and filename with no extension from path
input_dir, input_filename = os.path.split(input_path)
input_filename_no_ext, __ = os.path.splitext(input_filename)
# Read relevant tables from workbook
if verbose:
print("Reading {}...".format(input_filename))
instruments_table = read_table(input_path,
sheetname='Instruments',
index_col='ID')
beads_table = read_table(input_path,
sheetname='Beads',
index_col='ID')
samples_table = read_table(input_path,
sheetname='Samples',
index_col='ID')
# Process beads samples
beads_samples, mef_transform_fxns, mef_outputs = process_beads_table(
beads_table,
instruments_table,
base_dir=input_dir,
verbose=verbose,
plot=plot,
plot_dir='plot_beads',
full_output=True)
# Add stats to beads table
if verbose:
print("")
print("Calculating statistics for beads...")
add_beads_stats(beads_table, beads_samples, mef_outputs)
# Process samples
samples = process_samples_table(
samples_table,
instruments_table,
mef_transform_fxns=mef_transform_fxns,
beads_table=beads_table,
base_dir=input_dir,
verbose=verbose,
plot=plot,
plot_dir='plot_samples')
# Add stats to samples table
if verbose:
print("")
print("Calculating statistics for all samples...")
add_samples_stats(samples_table, samples)
# Generate histograms
if hist_sheet:
if verbose:
print("Generating histograms table...")
histograms_table = generate_histograms_table(samples_table, samples)
# Generate about table
about_table = generate_about_table({'Input file path': input_path})
# Generate list of tables to save
table_list = []
table_list.append(('Instruments', instruments_table))
table_list.append(('Beads', beads_table))
table_list.append(('Samples', samples_table))
if hist_sheet:
table_list.append(('Histograms', histograms_table))
table_list.append(('About Analysis', about_table))
# Write output excel file
if verbose:
print("Saving output Excel file...")
if output_path is None:
output_filename = "{}_output.xlsx".format(input_filename_no_ext)
output_path = os.path.join(input_dir, output_filename)
write_workbook(output_path, table_list)
if verbose:
print("\nDone.")
def run_command_line(args=None):
"""
Entry point for the FlowCal and flowcal console scripts.
Parameters
----------
args: list of strings, optional
Command line arguments. If None or not specified, get arguments
from ``sys.argv``.
See Also
--------
FlowCal.excel_ui.run
References
----------
http://amir.rachum.com/blog/2017/07/28/python-entry-points/
"""
# Get arguments from ``sys.argv`` if necessary.
# ``sys.argv`` has the name of the script as its first element. We remove
# this element because it will break ``parser.parse_args()`` later. In fact,
# ``parser.parse_args()``, if provided with no arguments, will also use
# ``sys.argv`` after removing the first element.
if args is None:
args = sys.argv[1:]
import argparse
# Read command line arguments
parser = argparse.ArgumentParser(
description="process flow cytometry files with FlowCal's Excel UI.")
parser.add_argument(
"-i",
"--inputpath",
type=str,
nargs='?',
help="input Excel file name. If not specified, show open file window")
parser.add_argument(
"-o",
"--outputpath",
type=str,
nargs='?',
help="output Excel file name. If not specified, use [INPUTPATH]_output")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="print information about individual processing steps")
parser.add_argument(
"-p",
"--plot",
action="store_true",
help="generate and save density plots/histograms of beads and samples")
parser.add_argument(
"-H",
"--histogram-sheet",
action="store_true",
help="generate sheet in output Excel file specifying histogram bins")
args = parser.parse_args(args=args)
# Run Excel UI
run(input_path=args.inputpath,
output_path=args.outputpath,
verbose=args.verbose,
plot=args.plot,
hist_sheet=args.histogram_sheet)
if __name__ == '__main__':
run_command_line()
| mit | 3,935,264,825,413,908,000 | 41.098556 | 83 | 0.559444 | false |
ruschelp/cortex-vfx | test/IECore/ops/imagePrimitiveInOut/imagePrimitiveInOut-1.py | 12 | 2283 | ##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
class imagePrimitiveInOut( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self, "",
IECore.ImagePrimitiveParameter(
"result", "",
defaultValue = IECore.ImagePrimitive()
)
)
self.parameters().addParameter(
IECore.ImagePrimitiveParameter(
"input",
"",
defaultValue = IECore.ImagePrimitive(),
),
)
def doOperation( self, args ):
return args["input"].copy()
IECore.registerRunTimeTyped( imagePrimitiveInOut )
| bsd-3-clause | -4,463,369,717,491,706,400 | 34.671875 | 76 | 0.679369 | false |
HSC-Users/hscTools | bick/bin/hscOverlay.py | 2 | 4221 | #!/usr/bin/env python
# Original filename: bin/hscOverlay.py
#
# Author: Steven Bickerton
# Email:
# Date: Tue 2014-10-07 11:50:49
#
# Summary:
#
import sys
import os
import re
import math
import argparse
import lsst.daf.persistence as dafPersist
import hsc.pipe.base.butler as hscButler
import numpy
import matplotlib.figure as figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigCanvas
import PIL
import hsc.tools.bick.utils as hscUtil
import lsst.afw.cameraGeom as camGeom
import lsst.afw.cameraGeom.utils as camGeomUtils
#############################################################
#
# Main body of code
#
#############################################################
def main(figname, camera='hsc', output=None):
# build a camera
if camera == 'hsc':
simdir = os.environ['OBS_SUBARU_DIR']
cameraGeomPaf = os.path.join(simdir, "hsc", "hsc_geom.paf")
if not os.path.exists(cameraGeomPaf):
cameraGeomPaf = os.path.join(simdir, "hsc", "description", "hsc_geom.paf")
if not os.path.exists(cameraGeomPaf):
raise Exception("Unable to find cameraGeom Policy file: %s" % (cameraGeomPaf))
cameraGeomPolicy = camGeomUtils.getGeomPolicy(cameraGeomPaf)
camera = camGeomUtils.makeCamera(cameraGeomPolicy)
if not camera:
raise ValueError("Camera must be ... uhm ... 'hsc'")
###########################
# make RBG arrays with FpaImage objects
# The FpaImage objects have a method to extract pixels from one CCD
# So we'll create a dummy image and put the whole user image in it,
# then we'll copy each CCD into the final image
###########################
bin = 16
fpa_img = [
hscUtil.FpaImage(camera, scale=bin),
hscUtil.FpaImage(camera, scale=bin),
hscUtil.FpaImage(camera, scale=bin),
]
fpa_dum = [
hscUtil.FpaImage(camera, scale=bin),
hscUtil.FpaImage(camera, scale=bin),
hscUtil.FpaImage(camera, scale=bin),
]
ny, nx = fpa_img[0].image.shape
###########################
# Load the image the user provided.
# figure out how much to scale things so it fits in our FpaImage
###########################
img0 = PIL.Image.open(figname)
h, w = img0.size
if h >= w:
scale = 1.0*ny/h
else:
scale = 1.0*nx/w
img = numpy.array(img0.resize((int(scale*h),int(scale*w)), PIL.Image.ANTIALIAS))
subw = int(scale*w)
subh = int(scale*h)
x0 = int(0.5*(nx - subw))
y0 = int(0.5*(ny - subh))
fpa_dum[0].image += 100
for i in 0,1,2:
fpa_dum[i].image[y0:y0+subh,x0:x0+subw] = img[:,:,i]
###########################
# Now copy each CCD from the dummy to the original
###########################
for i in 0,1,2: #, 1, 2:
for i_ccd in range(104):
img2 = fpa_dum[i].getPixels(i_ccd)
print i_ccd, img2.shape
fpa_img[i].insert(i_ccd, img2)
###########################
# convert this to an RBG image
# we don't need to worry about uint8 with range 256, matplotlib will handle normalization.
###########################
color_img = numpy.ones((ny, nx, 3), dtype=numpy.uint8)
for i in range(3):
color_img[:,:,i] = fpa_img[i].image
###########################
# plot it
###########################
fig = figure.Figure()
canvas = FigCanvas(fig)
ax = fig.add_subplot(111)
ax.imshow(color_img)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
fig.savefig(output)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--version", action='version', version="0.0")
parser.add_argument("figure", type=str, help="")
parser.add_argument("-c", "--camera", default='hsc', choices=("hsc"), help="Which camera?")
parser.add_argument("-o", "--output", default=None, help="")
args = parser.parse_args()
if not args.output:
fname, ext = os.path.splitext(args.figure)
args.output = fname + "-hsc" + ext
main(args.figure, output=args.output, camera=args.camera)
| gpl-3.0 | 4,461,821,912,527,629,300 | 30.5 | 95 | 0.55674 | false |
ironsmile/tank4eta | render.py | 1 | 6364 | #!/usr/bin/env python
#-*- coding: utf8 -*-
import sys
import time
import pygame
import logging
import pygame.font
import fonts
from locals import *
RESOLUTION = (1024, 768)
class Render (object):
def __init__(self, fullscreen=False):
pygame.display.init()
pygame.display.set_caption("tank4eta")
self.fullscreen = fullscreen
self.should_flip = False
self.show_fps = False
self.fps = 0
self.ndi = pygame.display.Info()
self.debug_display(self.ndi, "native")
self.render_resolution = None
self.screen = None
self.background = None
self.toggle_full_screen(
force_fullscreen_to=self.fullscreen,
initial=True
)
self.set_render_resolution(RESOLUTION)
if sys.platform.startswith('win') or sys.platform.startswith('linux'):
self.should_flip = True
def set_render_resolution(self, resolution):
if self.render_resolution == resolution:
return
self.render_resolution = resolution
self.create_aspect_surface()
def set_background(self, sprite):
self.background.fill(BACKGROUND_COLOUR)
sprite.draw(self.background)
self.draw_background()
def draw_background(self):
self.aspect_surface.blit(self.background, (0, 0))
def clear(self, sprites=[]):
for obj_group in sprites:
obj_group.clear(self.aspect_surface, self.background)
def draw(self, sprites=[]):
changed = []
for obj_group in sprites:
changed += obj_group.draw(self.aspect_surface)
self.update(changed)
def update(self, changed):
self.draw_fps()
pygame.display.update(changed)
if self.should_flip:
pygame.display.flip()
def draw_fps(self):
if not self.show_fps:
return
fps_text = "{0}".format(int(self.fps))
fps_sprite = fonts.get_serif_normal().render(fps_text, 1, YELLOW)
fps_rect = fps_sprite.get_rect()
self.aspect_surface.blit(self.background, (0, 0), fps_rect)
self.aspect_surface.blit(fps_sprite, (0, 0))
return fps_rect
def draw_end_game_screen(self, text, stats_text):
self.screen.fill(BACKGROUND_COLOUR)
title_text_rect = fonts.get_serif_big().render(text, 1, WHITE)
title_x = (self.screen.get_width() - title_text_rect.get_width()) / 2
title_y = (self.screen.get_height() - title_text_rect.get_height()) / 2
self.screen.blit(title_text_rect, (title_x, title_y))
stats_text_rect = fonts.get_serif_normal().render(stats_text, 1, SILVER)
text_x = (self.screen.get_width() - stats_text_rect.get_width()) / 2
text_y = (self.screen.get_height() - stats_text_rect.get_height()) / 2 + 50
self.screen.blit(stats_text_rect, (text_x, text_y))
pygame.display.flip()
def clear_screen(self):
self.screen.fill(BLACK)
pygame.display.flip()
def debug_display(self, display, name=None):
logging.debug("-" * 10)
if name is None:
logging.debug("Debugging unknown display")
else:
logging.debug("Debugging %s display" % name)
logging.debug("Hardware acceleration: %d" % display.hw)
logging.debug("Can be windowed: %d" % display.wm)
logging.debug("Video memory: %d" % display.video_mem)
logging.debug("Width, Height: %dx%d" % (display.current_w, display.current_h))
logging.debug("-" * 10)
def create_aspect_surface(self):
render_w, render_h = self.render_resolution
display_w, display_h = self.resolution
render_ratio = float(render_w) / render_h
aspect_w = int(render_ratio * display_h)
aspect_h = display_h
aspect = (aspect_w, aspect_h)
sub_x = int((display_w - aspect_w) / 2)
sub_y = int((display_h - aspect_h) / 2)
pos = (sub_x, sub_y)
logging.debug("Aspect surface is %dx%d" % aspect)
logging.debug("Aspect surface is on coords (%d, %d)" % pos)
aserf = self.screen.subsurface(
pygame.Rect(pos, aspect)
)
self.aspect_surface = aserf
self.aspect_resolution = aspect
self.background = self.aspect_surface.copy()
if self.render_resolution == aspect:
return
# Since the render resolution is always floored it is guaranteed to be
# less or equal to the aspect resolution. We the following we are centering
# the picture in the aspect surface
ox = int((aspect_w - render_w) / 2)
oy = int((aspect_h - render_h) / 2)
self.aspect_surface = self.aspect_surface.subsurface(
pygame.Rect((ox, oy), self.render_resolution)
)
def update_fps(self, clock):
self.fps = clock.get_fps()
def quit(self):
pygame.display.quit()
def toggle_full_screen(self, force_fullscreen_to=None, initial=False):
if not initial:
pygame.display.quit()
time.sleep(1)
pygame.display.init()
if force_fullscreen_to is not None:
self.fullscreen = not force_fullscreen_to
if not self.fullscreen:
logging.debug("Going into fullscreen")
self.fullscreen = True
else:
logging.debug("Going into windowed mode")
self.fullscreen = False
if self.fullscreen:
self.resolution = (self.ndi.current_w, self.ndi.current_h)
self.screen = pygame.display.set_mode(
self.resolution,
pygame.FULLSCREEN | pygame.DOUBLEBUF | pygame.HWSURFACE
)
else:
self.resolution = RESOLUTION
self.screen = pygame.display.set_mode(
self.resolution,
pygame.DOUBLEBUF | pygame.HWSURFACE
)
self.display_info = pygame.display.Info()
self.debug_display(self.display_info, "game")
if not initial:
self.create_aspect_surface()
def show_menu_instruction(self, instruction):
text_obj = fonts.serif_normal.render(instruction, True, SILVER)
text_x = (self.screen.get_width() - text_obj.get_width()) / 2
text_y = 40
self.screen.blit(text_obj, (text_x, text_y))
| mit | 5,978,659,144,117,039,000 | 32.671958 | 86 | 0.594595 | false |
led02/F2x | src/F2x/parser/plyplus/tree.py | 2 | 15740 | # Copyright 2018 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Created on 08.04.2016
@author: meinel
'''
from F2x.parser import tree
class VarDecl(tree.VarDecl):
"""
A variable declaration.
The following properties are available:
- name: The symbolic name of the variable.
- type: The C type of this variable. This might be a basic type (REAL, INTEGER, LOGICAL) or TYPE(C) for any
other type like arrays, derived types or strings.
- pytype, cstype: The type to be used by Python or C# respectively.
- intent: May be 'IN', 'OUT' or 'INOUT'.
- getter: This indicates whether the generated getter should be a 'function' or 'subroutine'.
- setter (opt): This indicates whether a 'subroutine' should be generated as setter.
- ftype (opt): The name of the derived type.
- strlen (opt): The length of the string.
- kind (opt): The kind specifier if available.
- dynamic (opt): Indicates whether the variable is 'ALLOCATABLE' or a 'POINTER'.
- dims (opt): For an array contains a list with the sizes per dimension.
"""
_PYTYPES = {
"REAL": "ctypes.c_double",
"INTEGER": "ctypes.c_int",
"LOGICAL": "ctypes.c_bool",
"TYPE(C_PTR)": "ctypes.c_void_p",
}
_CSTYPES = {
"REAL": "Double",
"INTEGER": "Int32",
"LOGICAL": "Int32",
"TYPE(C_PTR)": "IntPtr",
}
def _init_children(self):
self["name"] = self._ast.select1("name").tail[0]
# Identify FORTRAN type and store properties accordingly
full_spec = self._ast.parent().parent()
type_spec = full_spec.select1("declaration_type_spec")
try:
self["ftype"] = type_spec.select1("derived_type_spec name").tail[0]
self["type"] = "TYPE(C_PTR)"
self["getter"] = "function"
self["dynamic"] = False
except ValueError:
try:
self["strlen"] = int(type_spec.select1("char_selector int_literal_constant").tail[0])
self["intent"] = "IN"
self["type"] = "TYPE(C_PTR)"
self["pytype"] = "ctypes.c_char_p"
self["cstype"] = "String"
self["getter"] = "subroutine"
self["setter"] = "subroutine"
except ValueError:
try:
self["strlen"] = type_spec.select1("char_selector /(\*|:)/")
self["intent"] = "IN"
self["type"] = "TYPE(C_PTR)"
self["pytype"] = "ctypes.c_char_p"
self["cstype"] = "String"
self["getter"] = "subroutine"
self["setter"] = "subroutine"
except ValueError:
self["type"] = type_spec.select1("intrinsic_type_kind").tail[0]
self["getter"] = "function"
self["setter"] = "subroutine"
for attr in full_spec.select(self._prefix + "attr_spec"):
if 'ALLOCATABLE' in attr.tail:
self["dynamic"] = 'ALLOCATABLE'
elif 'POINTER' in attr.tail:
self["dynamic"] = 'POINTER'
# Identify array dimensions
for ast in (self._ast, full_spec):
dim_nodes = ast.select(self._prefix + "array_spec array_spec_element")
if not dim_nodes:
continue
dims = []
for node in dim_nodes:
dim = node.select("int_literal_constant")
if dim:
dims.append(dim[0].tail[0])
continue
dim = node.select("part_ref")
if dim:
dims.append(dim[0].tail[0])
break
dims.append(0)
if dims:
self["dims"] = dims
if "dims" in self \
and "strlen" not in self:
if "setter" in self:
del self["setter"]
if "pytype" not in self \
and self["type"].upper() in self._PYTYPES:
self["pytype"] = self._PYTYPES[self["type"].upper()]
if "cstype" not in self \
and self["type"].upper() in self._CSTYPES:
self["cstype"] = self._CSTYPES[self["type"].upper()]
try:
kind_selector = type_spec.select1("kind_selector int_literal_constant")
self["kind"] = int(kind_selector.tail[0])
except ValueError:
try:
kind_selector = type_spec.select1("kind_selector part_ref")
self["kind"] = kind_selector.tail[0]
except ValueError:
pass
try:
intent_spec = type_spec.parent().select1("intent_spec")
self["intent"] = intent_spec.tail[0]
except ValueError:
self["intent"] = 'IN'
# No setter for PARAMETERs
if "setter" in self \
and len(full_spec.select("attr_spec /PARAMETER/")) > 0:
del self["setter"]
def with_intent(self, intent):
self["intent"] = intent
return self
class TypeDef(tree.TypeDef):
def _init_children(self):
self["name"] = self._ast.select1("derived_type_stmt name").tail[0]
try:
self["public"] = (self._ast.select1("access_spec").tail[0].upper() == 'PUBLIC')
except ValueError:
self["public"] = False
self["fields"] = [
VarDecl(decl, 'component_') # See documentation of VarDecl.__init__
for decl in self._ast.select("component_decl")
]
for field in self["fields"]:
del field["intent"]
class SubDef(tree.SubDef):
_PREFIX = "subroutine"
def _init_children(self):
self["name"] = self._ast.select(self._PREFIX + "_stmt name")[0].tail[0]
# Two-stage argument extraction:
# First, identify all variables declared and the dummy argument list.
dummy_args = [arg.tail[0] for arg in self._ast.select("dummy_arg name")]
var_specs = dict(
(argdecl.select1("name").tail[0], VarDecl(argdecl))
for argdecl in self._ast.select("entity_decl")
)
# Fill up self["args"] based on dummy argument list order.
self["args"] = [var_specs[argname] for argname in dummy_args]
return var_specs # to be re-used in child classes.
class FuncDef(SubDef):
_PREFIX = "function"
def _init_children(self):
var_specs = super(FuncDef, self)._init_children()
# Capture return type of function for return value.
res_name = self._ast.select("result_name name")
if res_name:
self["ret"] = var_specs[res_name[0].tail[0]]
else:
try:
self["ret"] = var_specs[self["name"] + "_VALUE"]
except KeyError:
self["ret"] = var_specs[self["name"]]
if "dims" in self["ret"]:
self["ret"]["getter"] = "subroutine"
self["ret"]["intent"] = "OUT"
class Module(tree.Module):
def _init_children(self):
self["name"] = self._ast.select1("module_stmt name").tail[0]
self["uses"] = [use.tail[0] for use in self._ast.select("use_stmt name")]
self["types"] = [
TypeDef(typedef)
for typedef in self._ast.select("derived_type_def")
]
self["globals"] = [
VarDecl(var)
for var in self._ast.select("module > specification_part type_declaration_stmt entity_decl")
if len(var.parent().parent().select("access_spec /PUBLIC/")) > 0
]
# def export_methods(self, config):
def export_methods(self, src):
config = src.config
if config.has_section("export"):
export_items = [key for key, _ in config.items("export")]
else:
export_items = None
methods = []
for funcdef in self._ast.select("function_subprogram") :
if export_items is None or funcdef.select("function_stmt name")[0].tail[0].lower() in export_items:
method = FuncDef(funcdef)
method["export_name"] = config.get("export", method["name"].lower(), fallback=f'{self["name"]}_{method["name"]}')
if "ret" in method:
if "dims" in method["ret"]:
l_line = [line for line in src.source_lines if method["ret"]["name"] in line and "ALLOCATE" in line]
if len(l_line) == 1:
#ok, it is a dynamic array, find the size variable of the array
l_aux_line = l_line[0][l_line[0].find(method["ret"]["name"]):-2]
l_size_var = l_aux_line[len(method["ret"]["name"])+1:-1].split(',')
method["ret"]["dims"] = l_size_var
if method["ret"]["getter"] == "subroutine":
if method["ret"]["name"] == method["name"]:
method["ret"]["name"] = method["export_name"].upper() + '_OUT'
method["ret"]["intent"] = "OUT"
else:
method["ret"]["name"] = method["export_name"].upper() + '_RESULT'
del method["ret"]["intent"]
methods.append(method)
for subdef in self._ast.select("subroutine_subprogram") :
if export_items is None or subdef.select("subroutine_stmt name")[0].tail[0].lower() in export_items:
method = SubDef(subdef)
method["export_name"] = config.get("export", method["name"].lower(), fallback=f'{self["name"]}_{method["name"]}')
l_array_args = [ l_arg for l_arg in method["args"] if "dims" in l_arg ]
if len(l_array_args) > 0:
#okay, we have arguments of array type
sub_start, sub_end = self._get_subroutine(method["name"], src.source_lines)
for arg in l_array_args:
self._set_array_size(arg, src.source_lines[sub_start: sub_end])
if "ret" in method:
method["ret"]["name"] = method["export_name"].upper() + '_OUT'
method["ret"]["intent"] = "OUT"
methods.append(method)
self["methods"] = methods
for method in methods:
section_key = "{0}:Cleanup".format(method["name"])
if config.has_section(section_key):
if "ret" in method: print("FREE", section_key, method["ret"]["name"])
if "ret" in method and config.has_option(section_key, method["ret"]["name"]):
method["ret"]["free"] = config.get(section_key, method["ret"]["name"])
for var in method["args"]:
if config.has_option(section_key, var["name"]):
var["free"] = config.get(section_key, var["name"])
def _set_array_size(self, a_argument, a_src):
l_arg = a_argument["name"]
l_arg_len = len(l_arg)
l_key_len = 8 # keyword "ALLOCATE"
for index, line in enumerate(a_src) :
# to do: skip the comments
l_line = line[line.find("::")+2 : ].strip()
# this is the declaration line
if l_line.startswith(l_arg+'(') :
l_declare = l_line.split('!')
l_array_var = l_declare[0].strip()
l_size_var = l_array_var[l_arg_len+1:-1].split(',')
if l_size_var[0] == ':':
# check if the array is dynamically allocated within the function/subroutine body
for line in a_src[index:] :
line = line.strip()
if line.startswith("ALLOCATE") :
# skip comment
l_alloc = line.split('!')[0].strip()
l_line = l_alloc[l_key_len:].strip()[1:-1]
l_alloc_list = l_line.split('),')
# check if more than one variables are allocated
if len(l_alloc_list) > 1 :
for l_alloc in l_alloc_list :
l_alloc = l_alloc.strip()
if l_alloc.startswith(l_arg + '(') :
l_aux_line = ''
if l_alloc.endswith(')') :
l_aux_line = l_alloc[l_arg_len+1:-1].strip()
else :
l_aux_line = l_alloc[l_arg_len+1:].strip()
l_size_var = l_aux_line.split(',')
a_argument["dims"] = l_size_var
break
else :
l_alloc = l_alloc_list[0].strip()
if l_alloc.startswith(l_arg + '(') :
l_aux_line = l_alloc[l_arg_len+1:-1].strip()
l_size_var = l_aux_line.split(',')
a_argument["dims"] = l_size_var
else :
# okay, no size variable is found. It could be "IN" or "INOUT" type,
if len(l_declare) == 2 :
l_comment = l_declare[1].strip()
l_f2x_markup='@F2x=>'
if l_comment.startswith(l_f2x_markup) :
l_vars = l_comment.split(l_f2x_markup+l_arg)[1]
l_size_var = l_vars[1:-1].split(',')
a_argument["dims"] = l_size_var
else :
# Attention: no information is provided, code is not reliable !!
# But at leaset make sure the dimension is correctly set
n = len(l_size_var)
a_argument["dims"] = [ 0 if x == ':' else x for x in l_size_var ]
else :
# Same problem as above !!
n = len(l_size_var)
a_argument["dims"] = [ 0 if x == ':' else x for x in l_size_var ]
else :
# size variables are set explicitly
a_argument["dims"] = l_size_var
break
def _get_subroutine(self,a_argument, a_src):
startIndex = 0
stopIndex =0
for i in range(len(a_src)):
l_str = a_src[i].strip()
if l_str.startswith("SUBROUTINE") and a_argument in l_str :
startIndex = i
for j, line in enumerate(a_src[i:]):
line = line.strip()
if line.startswith("END SUBROUTINE") :
stopIndex = i + j
break
break
else:
# should not happend
pass
return (startIndex, stopIndex)
| apache-2.0 | -1,514,153,886,316,261,400 | 40.421053 | 129 | 0.488882 | false |
broferek/ansible | lib/ansible/module_utils/network/dellos6/dellos6.py | 13 | 10501 | #
# (c) 2015 Peter Sprygada, <[email protected]>
# (c) 2017 Red Hat, Inc
#
# Copyright (c) 2016 Dell Inc.
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import Connection, ConnectionError, exec_command
from ansible.module_utils.network.common.config import NetworkConfig, ConfigLine, ignore_line
_DEVICE_CONFIGS = {}
WARNING_PROMPTS_RE = [
r"[\r\n]?\[confirm yes/no\]:\s?$",
r"[\r\n]?\[y/n\]:\s?$",
r"[\r\n]?\[yes/no\]:\s?$"
]
dellos6_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
}
dellos6_argument_spec = {
'provider': dict(type='dict', options=dellos6_provider_spec),
}
dellos6_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'authorize': dict(removed_in_version=2.9, type='bool'),
'auth_pass': dict(removed_in_version=2.9, no_log=True),
'timeout': dict(removed_in_version=2.9, type='int'),
}
dellos6_argument_spec.update(dellos6_top_spec)
def get_provider_argspec():
return dellos6_provider_spec
def get_connection(module):
if hasattr(module, '_dellos6_connection'):
return module._dellos6_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._dellos6_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._dellos6_connection
def get_capabilities(module):
if hasattr(module, '_dellos6_capabilities'):
return module._dellos6_capabilities
try:
capabilities = Connection(module._socket_path).get_capabilities()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
module._dellos6_capabilities = json.loads(capabilities)
return module._dellos6_capabilities
def check_args(module, warnings):
pass
def get_config(module, flags=None):
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
cfg = to_text(out, errors='surrogate_or_strict').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def run_commands(module, commands, check_rc=True):
connection = get_connection(module)
try:
return connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def load_config(module, commands):
rc, out, err = exec_command(module, 'configure terminal')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict'))
for command in to_list(commands):
if command == 'end':
continue
# cmd = {'command': command, 'prompt': WARNING_PROMPTS_RE, 'answer': 'yes'}
rc, out, err = exec_command(module, command)
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc)
exec_command(module, 'end')
def get_sublevel_config(running_config, module):
contents = list()
current_config_contents = list()
sublevel_config = Dellos6NetworkConfig(indent=0)
obj = running_config.get_object(module.params['parents'])
if obj:
contents = obj._children
for c in contents:
if isinstance(c, ConfigLine):
current_config_contents.append(c.raw)
sublevel_config.add(current_config_contents, module.params['parents'])
return sublevel_config
def os6_parse(lines, indent=None, comment_tokens=None):
sublevel_cmds = [
re.compile(r'^vlan !(priority).*$'),
re.compile(r'^stack.*$'),
re.compile(r'^interface.*$'),
re.compile(r'datacenter-bridging.*$'),
re.compile(r'line (console|telnet|ssh).*$'),
re.compile(r'ip ssh !(server).*$'),
re.compile(r'(ip|mac|management|arp) access-list.*$'),
re.compile(r'ip dhcp pool.*$'),
re.compile(r'ip vrf !(forwarding).*$'),
re.compile(r'ipv6 (dhcp pool|router).*$'),
re.compile(r'mail-server.*$'),
re.compile(r'vpc domain.*$'),
re.compile(r'router.*$'),
re.compile(r'route-map.*$'),
re.compile(r'policy-map.*$'),
re.compile(r'((class-map match-(all|any))|(class\s)).*$'),
re.compile(r'captive-portal.*$'),
re.compile(r'admin-profile.*$'),
re.compile(r'link-dependency group.*$'),
re.compile(r'banner motd.*$'),
re.compile(r'openflow.*$'),
re.compile(r'support-assist.*$'),
re.compile(r'template.*$'),
re.compile(r'address-family.*$'),
re.compile(r'spanning-tree mst configuration.*$'),
re.compile(r'logging (?!.*(cli-command|buffered|console|email|facility|file|monitor|protocol|snmp|source-interface|traps|web-session)).*$'),
re.compile(r'(radius-server|tacacs-server) host.*$'),
re.compile(r'radius server (auth|acct).*$'),
re.compile(r'aaa server radius dynamic-author.*$')]
childline = re.compile(r'^exit$')
config = list()
parent = list()
children = []
parent_match = False
for line in str(lines).split('\n'):
text = str(re.sub(r'([{};])', '', line)).strip()
cfg = ConfigLine(text)
cfg.raw = line
if not text or ignore_line(text, comment_tokens):
continue
else:
parent_match = False
# handle sublevel parent
for pr in sublevel_cmds:
if pr.match(line):
if len(parent) != 0:
cfg._parents.extend(parent)
parent.append(cfg)
config.append(cfg)
if children:
children.insert(len(parent) - 1, [])
children[len(parent) - 2].append(cfg)
parent_match = True
continue
# handle exit
if childline.match(line):
if children:
parent[len(children) - 1]._children.extend(children[len(children) - 1])
if len(children) > 1:
parent[len(children) - 2]._children.extend(parent[len(children) - 1]._children)
cfg._parents.extend(parent)
children.pop()
parent.pop()
if not children:
children = list()
if parent:
cfg._parents.extend(parent)
parent = list()
config.append(cfg)
# handle sublevel children
elif parent_match is False and len(parent) > 0:
if not children:
cfglist = [cfg]
children.append(cfglist)
else:
children[len(parent) - 1].append(cfg)
cfg._parents.extend(parent)
config.append(cfg)
# handle global commands
elif not parent:
config.append(cfg)
return config
class Dellos6NetworkConfig(NetworkConfig):
def load(self, contents):
self._items = os6_parse(contents, self._indent)
def _diff_line(self, other, path=None):
diff = list()
for item in self.items:
if str(item) == "exit":
for diff_item in diff:
if diff_item._parents:
if item._parents == diff_item._parents:
diff.append(item)
break
else:
diff.append(item)
break
elif item not in other:
diff.append(item)
return diff
| gpl-3.0 | -143,854,291,924,755,060 | 37.892593 | 148 | 0.61137 | false |
tiancj/emesene | emesene/e3/papylib/papyon/papyon/service/AddressBook/scenario/base.py | 6 | 1815 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Johann Prieur <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from papyon.util.async import *
__all__ = ['BaseScenario', 'Scenario']
class BaseScenario(object):
def __init__(self, partner_scenario, callback, errback):
self._scenario = partner_scenario
self._callback = callback
self._errback = errback
def __set_scenario(self, scenario):
self._scenario = scenario
def __get_scenario(self):
return self._scenario
scenario = property(__get_scenario, __set_scenario)
def callback(self, *args):
run(self._callback, *args)
def errback(self, *args):
run(self._errback, *args)
def execute(self):
pass
def __call__(self):
return self.execute()
class Scenario(object):
"""Scenario label"""
INITIAL = "Initial"
TIMER = "Timer"
CONTACT_SAVE = "ContactSave"
GROUP_SAVE = "GroupSave"
BLOCK_UNBLOCK = "BlockUnblock"
CONTACT_MSGR_API = "ContactMsgrAPI"
MOBILE_CONTACT_MSGR_API = "MobileContactMsgrAPI"
MESSENGER_PENDING_LIST = "MessengerPendingList"
| gpl-3.0 | -727,098,125,315,662,300 | 30.293103 | 76 | 0.683196 | false |
ep1cman/workload-automation | wlauto/workloads/caffeinemark/__init__.py | 10 | 2731 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from wlauto import AndroidUiAutoBenchmark
class Caffeinemark(AndroidUiAutoBenchmark):
name = 'caffeinemark'
description = """
CaffeineMark is a series of tests that measure the speed of Java
programs running in various hardware and software configurations.
http://www.benchmarkhq.ru/cm30/info.html
From the website:
CaffeineMark scores roughly correlate with the number of Java instructions
executed per second, and do not depend significantly on the the amount of
memory in the system or on the speed of a computers disk drives or internet
connection.
The following is a brief description of what each test does:
- Sieve: The classic sieve of eratosthenes finds prime numbers.
- Loop: The loop test uses sorting and sequence generation as to measure
compiler optimization of loops.
- Logic: Tests the speed with which the virtual machine executes
decision-making instructions.
- Method: The Method test executes recursive function calls to see how
well the VM handles method calls.
- Float: Simulates a 3D rotation of objects around a point.
- Graphics: Draws random rectangles and lines.
- Image: Draws a sequence of three graphics repeatedly.
- Dialog: Writes a set of values into labels and editboxes on a form.
The overall CaffeineMark score is the geometric mean of the individual
scores, i.e., it is the 9th root of the product of all the scores.
"""
package = "com.flexycore.caffeinemark"
activity = ".Application"
summary_metrics = ['OverallScore']
regex = re.compile(r'CAFFEINEMARK RESULT: (?P<type>\w+) (?P<value>\S+)')
def update_result(self, context):
super(Caffeinemark, self).update_result(context)
with open(self.logcat_log) as fh:
for line in fh:
match = self.regex.search(line)
if match:
metric = match.group('type')
value = float(match.group('value'))
context.result.add_metric(metric, value)
| apache-2.0 | 7,416,160,328,666,156,000 | 39.161765 | 80 | 0.686562 | false |
ddico/odoo | odoo/addons/test_read_group/tests/test_empty.py | 43 | 1898 | # -*- coding: utf-8 -*-
from odoo.tests import common
class TestEmptyDate(common.TransactionCase):
""" Test what happens when grouping on date fields and getting a "false"
grouping value
"""
def setUp(self):
super(TestEmptyDate, self).setUp()
self.Model = self.env['test_read_group.on_date']
def test_empty_only(self):
self.Model.create({'value': 1})
self.Model.create({'value': 2})
self.Model.create({'value': 3})
gb = self.Model.read_group([], ['date', 'value'], ['date'], lazy=False)
self.assertEqual(gb, [{
'__count': 3,
'__domain': [('date', '=', False)],
'date': False,
'value': 6
}])
def test_empty_by_span(self):
self.Model.create({'value': 1})
self.Model.create({'value': 2})
self.Model.create({'value': 3})
gb = self.Model.read_group([], ['date', 'value'], ['date:quarter'], lazy=False)
self.assertEqual(gb, [{
'__count': 3,
'__domain': [('date', '=', False)],
'date:quarter': False,
'value': 6
}])
def test_mixed(self):
self.Model.create({'date': False, 'value': 1})
self.Model.create({'date': False, 'value': 2})
self.Model.create({'date': '1916-12-18', 'value': 3})
self.Model.create({'date': '1916-12-18', 'value': 4})
gb = self.Model.read_group([], ['date', 'value'], ['date'], lazy=False)
self.assertSequenceEqual(sorted(gb, key=lambda r: r['date'] or ''), [{
'__count': 2,
'__domain': [('date', '=', False)],
'date': False,
'value': 3,
}, {
'__count': 2,
'__domain': ['&', ('date', '>=', '1916-12-01'), ('date', '<', '1917-01-01')],
'date': 'December 1916',
'value': 7,
}])
| agpl-3.0 | 2,720,031,528,162,348,500 | 31.169492 | 89 | 0.48156 | false |
Project-Bonfire/EHA | Scripts/include/file_lists.py | 3 | 5233 |
# The idea here is to make some lists of the necessary files for each scenario
# in order to make the code a little more organized!
#---------------------------------------------------------
#
# Credit based related files
#
#---------------------------------------------------------
# Files for the base-line credit based router!
credit_based_files = ["arbiter_in.vhd", "arbiter_out.vhd", "allocator.vhd", "LBDR.vhd",
"xbar.vhd", "FIFO_one_hot_credit_based.vhd"]
credit_based_files_verilog = ["Verilog/arbiter_in.sv", "Verilog/arbiter_out.sv", "Verilog/allocator.sv", "Verilog/LBDR.sv",
"Verilog/xbar.sv", "Verilog/FIFO_one_hot_credit_based.sv"]
# Files for the credit based router equipped with packet-drop mechanism and Fault Classifier!
credit_based_files_PD_FC = ["arbiter_in.vhd", "arbiter_out.vhd", "allocator.vhd", "LBDR_packet_drop.vhd",
"xbar.vhd", "FIFO_one_hot_credit_based_packet_drop_classifier_support.vhd",
"counter_threshold.vhd"]
# Files for the credit based router equipped with packet-drop mechanism!
credit_based_files_PD = ["arbiter_in.vhd", "arbiter_out.vhd", "allocator.vhd", "LBDR_packet_drop.vhd",
"xbar.vhd", "FIFO_one_hot_credit_based_packet_drop.vhd"]
# Files for the credit based router equipped with packet-saving mechanism!
credit_based_files_PS = ["arbiter_in.vhd", "arbiter_out.vhd", "allocator.vhd", "LBDR_packet_drop.vhd",
"xbar.vhd", "FIFO_one_hot_credit_based_packet_drop_flit_saving.vhd"]
#TODO: needs to be tested:
credit_based_files_SHMU = ["allocator.vhd", "arbiter_in.vhd", "arbiter_out.vhd", "counter_threshold.vhd",
"FIFO_one_hot_credit_based_packet_drop_classifier_support.vhd", "LBDR_packet_drop.vhd", "NI.vhd",
"Router_32_bit_credit_based_packet_drop_classifier_SHMU.vhd", "xbar.vhd"]
credit_based_files_NI_Test = ["mlite_pack.vhd", "allocator.vhd", "arbiter_in.vhd", "arbiter_out.vhd", "counter_threshold.vhd", "ParityChecker_for_LBDR.vhd",
"FIFO_one_hot_credit_based_packet_drop_classifier_support.vhd", "LBDR_packet_drop.vhd",
"NI.vhd", "Router_32_bit_credit_based_packet_drop_classifier_SHMU.vhd", "xbar.vhd"]
#TODO: should add to NI_Test the following file: "TB_Package_32_bit_credit_based_NI.vhd"
# Original Plasma processor files along with the NI and NoC node wrapper!
PE_files = ["mlite_pack.vhd", "memory_sim.vhd", "alu.vhd", "bus_mux.vhd", "cache.vhd",
"control.vhd", "ddr_ctrl.vhd", "eth_dma.vhd", "mem_ctrl.vhd",
"mult.vhd", "pc_next.vhd", "pipeline.vhd", "ram.vhd",
"reg_bank_tri_port.vhd", "shifter.vhd", "uart.vhd", "NI.vhd",
"mlite_cpu.vhd", "plasma.vhd", "NoC_Node.vhd"]
#---------------------------------------------------------
#
# Checker's files
#
#---------------------------------------------------------
## CB
CB_Allocator_with_checkers_files = ["Arbiter_in_one_hot_checkers.vhd", "Arbiter_in_one_hot_with_checkers.vhd",
"Arbiter_out_one_hot_pseudo_checkers.vhd", "arbiter_out_one_hot_with_checkers.vhd",
"allocator_logic_pseudo_checkers.vhd", "allocator_credit_counter_logic_pseudo_checkers.vhd",
"allocator_with_checkers.vhd" ]
CB_Allocator_with_checkers_with_FI_files = ["Arbiter_in_one_hot_checkers.vhd", "Arbiter_in_one_hot_with_checkers_with_FI.vhd",
"Arbiter_out_one_hot_pseudo_checkers.vhd", "arbiter_out_one_hot_with_checkers_with_FI.vhd",
"allocator_logic_pseudo_checkers.vhd", "allocator_credit_counter_logic_pseudo_checkers.vhd",
"allocator_with_checkers_with_FI.vhd" ]
CB_FIFO_one_hot_CB_PD_FC_with_checkers_files = ["FIFO_one_hot_credit_based_packet_drop_classifier_support_checkers.vhd",
"FIFO_one_hot_credit_based_packet_drop_classifier_support_with_checkers.vhd"]
CB_FIFO_one_hot_CB_PD_FC_with_checkers_with_FI_files = ["FIFO_one_hot_credit_based_packet_drop_classifier_support_checkers.vhd",
"FIFO_one_hot_credit_based_packet_drop_classifier_support_with_checkers_with_FI.vhd"]
CB_LBDR_PD_with_checkers_files = ["Cx_Reconf_pseudo_checkers.vhd", "Rxy_Reconf_pseudo_checkers.vhd",
"LBDR_packet_drop_routing_part_pseudo_checkers.vhd",
"LBDR_packet_drop_with_checkers.vhd"]
CB_LBDR_PD_with_checkers_with_FI_files = ["Cx_Reconf_pseudo_checkers.vhd", "Rxy_Reconf_pseudo_checkers.vhd",
"LBDR_packet_drop_routing_part_pseudo_checkers.vhd",
"LBDR_packet_drop_with_checkers_with_FI.vhd"]
# HS
HS_Arbiter_one_hot_with_checkers =["Arbiter_checkers.vhd", "Arbiter_one_hot_with_checkers.vhd"]
HS_FIFO_one_hot_with_checkers =["FIFO_control_part_checkers.vhd", "FIFO_one_hot_with_checkers.vhd"]
HS_LBDR_with_checkers =["LBDR_checkers.vhd", "LBDR_with_checkers.vhd"]
| gpl-3.0 | 4,927,005,644,334,628,000 | 62.817073 | 156 | 0.596981 | false |
heurezjusz/Athena | athenet/layers/concat.py | 2 | 1809 | """Concatenation layer."""
import numpy as np
import theano.tensor as T
from athenet.layers import Layer
class Concatenation(Layer):
"""Concatenation layer."""
def __init__(self, input_layer_names=None, name='concat'):
"""Create concatenation layer.
:param input_layer_names: List of input layers' names.
"""
super(Concatenation, self).__init__(input_layer_names, name)
self.input_shapes = None
self._input_layers = None
@property
def input_layer_names(self):
return self.input_layer_name
@property
def input_layers(self):
return self._input_layers
@input_layers.setter
def input_layers(self, input_layers):
self._input_layers = input_layers
self.input_shapes = [layer.output_shape for layer in input_layers]
for input_shape in self.input_shapes[1:]:
if input_shape[:-1] != self.input_shapes[0][:-1]:
raise ValueError('all input layer image size must match')
self.input = [layer.output for layer in input_layers]
train_input = [layer.train_output for layer in input_layers]
if all([ti is not None for ti in train_input]):
self.train_input = train_input
@property
def output_shape(self):
x, y = self.input_shapes[0][:2]
n_channels = np.sum(
[input_shape[2] for input_shape in self.input_shapes])
return (x, y, n_channels)
def _get_output(self, layer_inputs):
"""Return layer's output.
:param layer_inputs: List of inputs in the format
(batch size, number of channels,
image height, image width).
:return: Layer output.
"""
return T.concatenate(layer_inputs, axis=1)
| bsd-2-clause | -5,675,369,083,648,082,000 | 30.736842 | 74 | 0.600884 | false |
sursum/buckanjaren | buckanjaren/lib/python3.5/site-packages/django/forms/models.py | 5 | 56541 | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from itertools import chain
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError,
)
from django.forms.fields import ChoiceField, Field
from django.forms.forms import BaseForm, DeclarativeFieldsMetaclass
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (
HiddenInput, MultipleHiddenInput, SelectMultiple,
)
from django.utils import six
from django.utils.encoding import force_text
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'ModelChoiceField', 'ModelMultipleChoiceField', 'ALL_FIELDS',
'BaseModelFormSet', 'modelformset_factory', 'BaseInlineFormSet',
'inlineformset_factory', 'modelform_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Leave defaults for fields that aren't in POST data, except for
# checkbox inputs because they don't appear in POST data if not checked.
if (f.has_default() and
form[f.name].field.widget.value_omitted_from_data(form.data, form.files, form.add_prefix(f.name))):
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
data[f.name] = f.value_from_object(instance)
return data
def apply_limit_choices_to_to_formfield(formfield):
"""Apply limit_choices_to to the formfield's queryset if needed."""
if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):
limit_choices_to = formfield.get_limit_choices_to()
if limit_choices_to is not None:
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None, apply_limit_choices_to=True):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
``apply_limit_choices_to`` is a boolean indicating if limit_choices_to
should be applied to a field's queryset.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_private_fields = [f for f in opts.private_fields if isinstance(f, ModelField)]
for f in sorted(chain(opts.concrete_fields, sortable_private_fields, opts.many_to_many)):
if not getattr(f, 'editable', False):
if (fields is not None and f.name in fields and
(exclude is None or f.name not in exclude)):
raise FieldError(
"'%s' cannot be specified for %s model form as it is a non-editable field" % (
f.name, model.__name__)
)
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if field_classes and f.name in field_classes:
kwargs['form_class'] = field_classes[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
if apply_limit_choices_to:
apply_limit_choices_to_to_formfield(formfield)
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
self.field_classes = getattr(options, 'field_classes', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
base_formfield_callback = None
for b in bases:
if hasattr(b, 'Meta') and hasattr(b.Meta, 'formfield_callback'):
base_formfield_callback = b.Meta.formfield_callback
break
formfield_callback = attrs.pop('formfield_callback', base_formfield_callback)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
raise ImproperlyConfigured(
"Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is prohibited; form %s "
"needs updating." % name
)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(
opts.model, opts.fields, opts.exclude, opts.widgets,
formfield_callback, opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages, opts.field_classes,
# limit_choices_to will be applied during ModelForm.__init__().
apply_limit_choices_to=False,
)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None, use_required_attribute=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(
data, files, auto_id, prefix, object_data, error_class,
label_suffix, empty_permitted, use_required_attribute=use_required_attribute,
)
for formfield in self.fields.values():
apply_limit_choices_to_to_formfield(formfield)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
# Allow the model generated by construct_instance() to raise
# ValidationError and have them handled in the same way as others.
if hasattr(errors, 'error_dict'):
error_dict = errors.error_dict
else:
error_dict = {NON_FIELD_ERRORS: errors}
for field, messages in error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
except ValidationError as e:
self._update_errors(e)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def _save_m2m(self):
"""
Save the many-to-many fields and generic relations for this form.
"""
cleaned_data = self.cleaned_data
exclude = self._meta.exclude
fields = self._meta.fields
opts = self.instance._meta
# Note that for historical reasons we want to include also
# private_fields here. (GenericRelation was previously a fake
# m2m field).
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(self.instance, cleaned_data[f.name])
def save(self, commit=True):
"""
Save this form's self.instance object if commit=True. Otherwise, add
a save_m2m() method to the form which can be called after the instance
is saved manually at a later time. Return the model instance.
"""
if self.errors:
raise ValueError(
"The %s could not be %s because the data didn't validate." % (
self.instance._meta.object_name,
'created' if self.instance._state.adding else 'changed',
)
)
if commit:
# If committing, save the instance and the m2m data immediately.
self.instance.save()
self._save_m2m()
else:
# If not committing, add a method to the form to allow deferred
# saving of m2m data.
self.save_m2m = self._save_m2m
return self.instance
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
field_classes=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``field_classes`` is a dictionary of model field names mapped to a form
field class.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
if field_classes is not None:
attrs['field_classes'] = field_classes
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
if formfield_callback:
Meta.formfield_callback = staticmethod(formfield_callback)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
raise ImproperlyConfigured(
"Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
# Instantiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
# Set of fields that must be unique among forms of this set.
unique_fields = set()
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = {o.pk: o for o in self.get_queryset()}
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) to_python.
"""
while field.remote_field is not None:
field = field.remote_field.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def delete_existing(self, obj, commit=True):
"""Deletes an existing model instance."""
if commit:
obj.delete()
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# Get the data for the set of fields that must be unique among the forms.
row_data = (
field if field in self.unique_fields else form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data
)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None and
form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext(
"Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s."
) % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
# If the pk is None, it means either:
# 1. The object is an unexpected empty model, created by invalid
# POST data such as an object outside the formset's queryset.
# 2. The object was already deleted from the database.
if obj.pk is None:
continue
if form in forms_to_delete:
self.deleted_objects.append(obj)
self.delete_existing(obj, commit=commit)
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return (
(not pk.editable) or (pk.auto_created or isinstance(pk, AutoField)) or (
pk.remote_field and pk.remote_field.parent_link and
pk_is_not_editable(pk.remote_field.model._meta.pk)
)
)
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
# If we're adding the related instance, ignore its primary key
# as it could be an auto-generated default which isn't actually
# in the database.
pk_value = None if form.instance._state.adding else form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.remote_field.model._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns a FormSet class for the given Django model class.
"""
meta = getattr(form, 'Meta', None)
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
raise ImproperlyConfigured(
"Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is prohibited."
)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts,
error_messages=error_messages, field_classes=field_classes)
FormSet = formset_factory(form, formset, extra=extra, min_num=min_num, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_min=validate_min, validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
self.unique_fields = {self.fk.name}
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if self.form._meta.fields and self.fk.name not in self.form._meta.fields:
if isinstance(self.form._meta.fields, tuple):
self.form._meta.fields = list(self.form._meta.fields)
self.form._meta.fields.append(self.fk.name)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
mutable = getattr(form.data, '_mutable', None)
# Allow modifying an immutable QueryDict.
if mutable is not None:
form.data._mutable = True
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
if mutable is not None:
form.data._mutable = mutable
# Set the fk value here so that the form can do its validation.
fk_value = self.instance.pk
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
fk_value = getattr(self.instance, self.fk.remote_field.field_name)
fk_value = getattr(fk_value, 'pk', fk_value)
setattr(form.instance, self.fk.get_attname(), fk_value)
return form
@classmethod
def get_default_prefix(cls):
return cls.fk.remote_field.get_accessor_name(model=cls.model).replace('+', '')
def save_new(self, form, commit=True):
# Ensure the latest copy of the related instance is present on each
# form (it may have been saved after the formset was originally
# instantiated).
setattr(form.instance, self.fk.name, self.instance)
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.remote_field.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.remote_field.field_name != self.fk.remote_field.model._meta.pk.name:
kwargs['to_field'] = self.fk.remote_field.field_name
# If we're adding a new object, ignore a parent's auto-generated key
# as it will be regenerated on the save request.
if self.instance._state.adding:
if kwargs.get('to_field') is not None:
to_field = self.instance._meta.get_field(kwargs['to_field'])
else:
to_field = self.instance._meta.pk
if to_field.has_default():
setattr(self.instance, to_field.attname, None)
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unless can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.remote_field.model != parent_model and
fk.remote_field.model not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s'." % (fk_name, parent_model._meta.label)
)
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s' has no field named '%s'." % (model._meta.label, fk_name)
)
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey) and (
f.remote_field.model == parent_model or
f.remote_field.model in parent_model._meta.get_parent_list()
)
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s' has no ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
else:
raise ValueError(
"'%s' has more than one ForeignKey to '%s'." % (
model._meta.label,
parent_model._meta.label,
)
)
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None,
min_num=None, validate_min=False, field_classes=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'min_num': min_num,
'max_num': max_num,
'widgets': widgets,
'validate_min': validate_min,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
'field_classes': field_classes,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
queryset = self.queryset.all()
# Can't use iterator() when queryset uses prefetch_related()
if not queryset._prefetch_related_lookups:
queryset = queryset.iterator()
for obj in queryset:
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) + (1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
iterator = ModelChoiceIterator
def __init__(self, queryset, empty_label="---------",
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.to_field_name = to_field_name
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
if self.queryset is not None:
result.queryset = self.queryset.all()
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return force_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return self.iterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, TypeError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(
queryset, None, required, widget, label, initial, help_text,
*args, **kwargs
)
def to_python(self, value):
if not value:
return []
return list(self._check_values(value))
def clean(self, value):
value = self.prepare_value(value)
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
qs = self._check_values(value)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def _check_values(self, value):
"""
Given a list of possible PK values, returns a QuerySet of the
corresponding objects. Raises a ValidationError if a given value is
invalid (not a valid PK, not in the queryset, etc.)
"""
key = self.to_field_name or 'pk'
# deduplicate given values to avoid creating many querysets or
# requiring the database backend deduplicate efficiently.
try:
value = frozenset(value)
except TypeError:
# list of lists isn't hashable, for example
raise ValidationError(
self.error_messages['list'],
code='list',
)
for pk in value:
try:
self.queryset.filter(**{key: pk})
except (ValueError, TypeError):
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
| mit | 2,205,083,225,850,833,000 | 40.882222 | 116 | 0.590085 | false |
onoga/wm | src/gnue/common/datasources/GConditions.py | 2 | 55841 | # GNU Enterprise Common Library - XML elements for conditions
#
# Copyright 2000-2007 Free Software Foundation
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: GConditions.py 9244 2007-01-09 19:25:54Z reinhard $
"""
Classes for the condition object tree.
"""
import re
import sys
import datetime
if sys.hexversion >= 0x02040000:
import decimal
from gnue.common.apps import errors
from gnue.common.definitions import GObjects
from gnue.common.formatting import GTypecast
from gnue.common.utils import GDateTime
# =============================================================================
# Exceptions
# =============================================================================
# -----------------------------------------------------------------------------
# Abstract base class
# -----------------------------------------------------------------------------
class ConditionError (errors.ApplicationError):
"""
Abstract base class for all errors in condition definitions.
"""
pass
# -----------------------------------------------------------------------------
# Malformed condition tree
# -----------------------------------------------------------------------------
class MalformedConditionTreeError (ConditionError):
"""
Abstract base class for all errors in the structure of the condition tree.
"""
pass
# -----------------------------------------------------------------------------
class ArgumentCountError (MalformedConditionTreeError):
"""
Number of child elements is incorrect.
"""
def __init__ (self, element, wanted):
msg = u_("Conditionelement '%(element)s' was expected to have '%(wanted)d'"
"arguments, but only has %(real)d'") \
% {'element': element._type,
'wanted' : wanted,
'real' : len (element._children)}
MalformedConditionTreeError.__init__ (self, msg)
# -----------------------------------------------------------------------------
# Field value not in lookup dictionary
# -----------------------------------------------------------------------------
class MissingFieldError (ConditionError):
"""
Cannot find field value on attempt to evaluate a condition.
"""
def __init__ (self, field):
msg = u_("The field '%(field)s' has no entry in the given lookup-table") \
% {'field': field }
ConditionError.__init__ (self, msg)
# -----------------------------------------------------------------------------
# Errors on unifying of different types
# -----------------------------------------------------------------------------
class UnificationError (errors.ApplicationError):
"""
Abstract base class for all errors on unifying different data types.
"""
pass
# -----------------------------------------------------------------------------
class ConversionRuleError (UnificationError):
"""
Cannot convert both data types into a common compatible type.
"""
def __init__ (self, value1, value2):
msg = u_("No unification rule for combination '%(type1)s' and "
"'%(type2)s'") \
% {'type1': type (value1).__name__,
'type2': type (value2).__name__}
UnificationError.__init__ (self, msg)
# -----------------------------------------------------------------------------
class ConversionError (UnificationError):
"""
Cannot convert a value.
"""
def __init__ (self, value1, value2):
msg = u_("Value '%(value1)s' of type '%(type1)s' cannot be converted "
"into type '%(type2)s'") \
% {'value1': value1,
'type1' : type (value1).__name__,
'type2' : type (value2).__name__}
UnificationError.__init__ (self, msg)
# =============================================================================
# Base condition class; this is class is the root node of condition trees
# =============================================================================
class GCondition (GObjects.GObj):
"""
A GCondition instance is allways the root node of a condition tree. All
children of a GCondition node are evaluated and combined using an AND
condition if not otherwise stated.
@ivar _maxChildren_: if not None specifies the maximum number of children
allowed for a condition element.
@ivar _operator_: unicode string defining the operator used for SQL
transformation of a condition element.
"""
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__(self, parent = None, type = "GCCondition", prefixList = None):
"""
@param parent: Parent instance in the GObj tree owning this instance
@param type: type of this instance (usually 'GCCondition')
@param prefixList: a condition in prefix notation; if this sequence is not
None, a condition tree according to this sequence will be built. This
instance is the root element of the newly created condition tree.
"""
GObjects.GObj.__init__ (self, parent, type = type)
self._maxChildren_ = None
self._operator_ = u""
if prefixList is not None:
self.buildFromList (prefixList)
self.validate ()
# ---------------------------------------------------------------------------
# Make sure an element of the tree has the requested number of children
# ---------------------------------------------------------------------------
def _needChildren (self):
"""
Ensure that the requested number of children is available.
@raise ArgumentCountError: raised if the number of children does not match
_maxChildren_.
"""
if self._maxChildren_ is not None and \
len (self._children) != self._maxChildren_:
raise ArgumentCountError, (self, self._maxChildren_)
# ---------------------------------------------------------------------------
# Evaluate a condition tree using the given lookup dictionary
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
"""
Evaluate a condition tree using a given lookup dictionary for field- and
parameter-values. Evaluation stops on the first False result.
@param lookup: dictionary used for lookups of field- and parameter-values.
@return: True or False
@raise ArgumentCountError: if the number of child elements somewhere in the
tree is incorrect.
@raise MissingFieldError: if not all fields appearing in the condition tree
are assigned a value in the lookup dictionary.
@raise ConversionRuleError: if any operation is given two incompatible
arguments.
@raise ConversionError: if the type conversion needed to make arguments of
an operation comatible fails.
"""
self.validate ()
for child in self._children:
if not child.evaluate (lookup):
return False
return True
# ---------------------------------------------------------------------------
# Validate an element of a condition tree
# ---------------------------------------------------------------------------
def validate (self):
"""
This function calls validate () on all it's children. Descendants might
override this function to do integrity checks and things like that.
@raise ArgumentCountError: if the number of child elements somewhere in the
tree is incorrect.
"""
self._needChildren ()
for child in self._children:
child.validate ()
# ---------------------------------------------------------------------------
# Convert an element into prefix notation
# ---------------------------------------------------------------------------
def prefixNotation (self):
"""
This function returns the prefix notation of an element and all it's
children.
"""
result = []
append = result.extend
if isinstance (self, GConditionElement):
result.append (self._type [2:])
append = result.append
for child in self._children:
append (child.prefixNotation ())
return result
# ---------------------------------------------------------------------------
# Build an element and all it's children from a prefix notation list
# ---------------------------------------------------------------------------
def asSQL (self, paramDict):
"""
Return the condition tree as SQL string in python-format using placeholders
and a given parameter dictionary.
Example::
condition = ['eq', ['field', 'foobar'], ['const', 'barbaz']]
result = condition.asSQL (pDict)
result = 'foobar = %(p0)s'
pDcit = {'p0': 'barbaz'}
@param paramDict: dictionary with all parameter values. this dictionary
will be populated with all placeholders used in the SQL string.
@return: SQL string representing the current condition
"""
f = isinstance (self.getParent (), GConditionElement) and u'(%s)' or u'%s'
op = u' %s ' % self._operator_
return f % op.join ([c.asSQL (paramDict) for c in self._children])
# ---------------------------------------------------------------------------
# Build an element and all it's children from a prefix notation list
# ---------------------------------------------------------------------------
def buildFromList (self, prefixList):
"""
This function creates a (partial) condition tree from a prefix notation
list.
@param prefixList: condition element sequence in prefix notation
@return: GCondition tree
"""
checktype (prefixList, list)
if len (prefixList):
item = prefixList [0]
# be nice if there's a condition part missing
offset = 1
if isinstance (item, list):
self.buildFromList (item)
element = self
else:
# automatically map 'field' to 'Field' and 'const' to 'Const'
if item in ['field', 'const']:
item = item.title ()
element = getattr (sys.modules [__name__], "GC%s" % item) (self)
if item == 'exist':
(table, masterlink, detaillink) = prefixList [1:4]
element.table = table
element.masterlink = masterlink
element.detaillink = detaillink
offset = 4
for subitem in prefixList [offset:]:
element.buildFromList (subitem)
# =============================================================================
# Parent class for all condition elements
# =============================================================================
class GConditionElement (GCondition) :
"""
Abstract base class for all condition elements.
"""
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__(self, parent=None, type="GConditionElement"):
GCondition.__init__ (self, parent, type = type)
# =============================================================================
# A Field element in the condition tree
# =============================================================================
class GCField (GConditionElement):
"""
Field value from a database table.
"""
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__(self, parent, name = None, datatype = "char"):
GConditionElement.__init__ (self, parent, 'GCCField')
self.type = datatype
self.name = name
# ---------------------------------------------------------------------------
# Evaluate a field element
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
"""
Return the value of the field in the given lookup dictionary.
@param lookup: dictionary used for lookups
@return: value of the field
@raise MissingFieldError: raised if the lookup dictionary does not contain
a key for this field
"""
if not lookup.has_key (self.name):
raise MissingFieldError, (self.name)
return lookup [self.name]
# ---------------------------------------------------------------------------
# A field in prefix notation is a tuple of 'field' and fieldname
# ---------------------------------------------------------------------------
def prefixNotation (self):
"""
The prefix notation of a field element is a tuple of the identifier 'field'
(acting as operator) and the field's name.
@return: ['Field', name]
"""
return ['Field', self.name]
# ---------------------------------------------------------------------------
# to complete a field element from a prefix notation set the fieldname
# ---------------------------------------------------------------------------
def buildFromList (self, prefixList):
"""
The single argument to a field 'operator' could be it's name, so this
method set's the fieldname.
"""
checktype (prefixList, basestring)
self.name = prefixList
# ---------------------------------------------------------------------------
# SQL represenation of a field is it's field name
# ---------------------------------------------------------------------------
def asSQL (self, paramDict):
"""
The SQL representation of a field is it's name.
@param paramDict: current parameter dictionary
@return: the name of the field
"""
return self.name
# =============================================================================
# A constant definition in a condition tree
# =============================================================================
class GCConst (GConditionElement):
"""
Constant value of a specific type.
"""
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__ (self, parent, value = None, datatype = "char"):
GConditionElement.__init__ (self, parent, 'GCCConst')
self.type = datatype
self.value = value
self._inits = [self.__typecast]
if self.value is not None:
self.__typecast ()
# ---------------------------------------------------------------------------
# Evaluate a constant
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
"""
This function returns the constants value
@param lookup: dictionary with lookup values
@return: value of the constant definition
"""
return self.value
# ---------------------------------------------------------------------------
# The prefix notation of a constant is a tuple of identifier and value
# ---------------------------------------------------------------------------
def prefixNotation (self):
"""
The prefix notation of a constant is a tuple of the identifier 'Const' and
the constant's value.
@return: ['Const', value]
"""
return ['Const', self.value]
# ---------------------------------------------------------------------------
# Recreate a constant from a prefix notation
# ---------------------------------------------------------------------------
def buildFromList (self, prefixList):
"""
The single argument of a constant 'operator' could be it's value, so this
function set the constant's value.
@param prefixList: element sequence in prefix notation. For a constant
definition this sequence must be the constant's value.
"""
self.value = prefixList
# ---------------------------------------------------------------------------
# Return an SQL representation of a constant
# ---------------------------------------------------------------------------
def asSQL (self, paramDict):
"""
Add another key to the parameter dictionary holding the constant's value
and return an apropriate place holder for it.
@param paramDict: parameter dictionary which will be extended
@return: placeholder for the constant, i.e. '%(p0)s'
"""
if self.value is None:
# Some backends don't like NULL to be a parameter
return u'NULL'
else:
pKey = "p%d" % len (paramDict)
paramDict [pKey] = self.value
return u'%%(%s)s' % pKey
# ---------------------------------------------------------------------------
# Create a native python type for the constant value
# ---------------------------------------------------------------------------
def __typecast (self):
dtype = self.type.lower ()
if dtype == 'boolean':
self.value = self.value.upper () in ['TRUE', 'Y', '1']
elif dtype == 'number':
# NOTE: what about the decimal separator depending on the locale?
if "." in self.value:
self.value = float (self.value)
else:
self.value = int (self.value)
elif dtype == 'date':
self.value = GDateTime.parseISODate (self.value)
elif dtype == 'time':
self.value = GDateTime.parseISOTime (self.value)
elif dtype == 'datetime':
self.value = GDateTime.parseISO (self.value)
# =============================================================================
# Base class for parameter elements in a condition tree
# =============================================================================
class GCParam (GConditionElement):
"""
Abstract class for parameters. Must be overridden by a descendant to handle
actual parameter values.
"""
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__ (self, parent, name = None, datatype = "char"):
GConditionElement.__init__ (self, parent, 'GCCParam')
self.type = datatype
self.name = name
# ---------------------------------------------------------------------------
# Return the value of a parameter
# ---------------------------------------------------------------------------
def getValue(self):
"""
Descendants override this function to return the value of the parameter.
"""
return ""
# ---------------------------------------------------------------------------
# Evaluate the parameter object
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
"""
A parameter element evaluates to it's value.
@param lookup: dictionary used for lookups
@return: the parameter's value
"""
return self.getValue ()
# ---------------------------------------------------------------------------
# Return a parameter object in prefix notation
# ---------------------------------------------------------------------------
def prefixNotation (self):
"""
The prefix notation of a parameter object is a 'constant' with the
parameters' value
@return: ['Const', value]
"""
return ['Const', self.getValue ()]
# ---------------------------------------------------------------------------
# Return a SQL representation of a parameter instance
# ---------------------------------------------------------------------------
def asSQL (self, paramDict):
"""
Add another key to the parameter dictionary holding the parameter's value
and return an apropriate place holder for it.
@param paramDict: parameter dictionary which will be extended
@return: placeholder for the parameter, i.e. '%(p0)s'
"""
pKey = "p%d" % len (paramDict)
paramDict [pKey] = self.getValue ()
return u'%%(%s)s' % pKey
# =============================================================================
# Base classes for unary operations
# =============================================================================
class GUnaryConditionElement (GConditionElement):
"""
Abstract base class for all unary condition elements.
"""
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__ (self, parent = None, elementType = ''):
GConditionElement.__init__ (self, parent, elementType)
self._maxChildren_ = 1
# ---------------------------------------------------------------------------
# Return a SQL representation of a unary operation
# ---------------------------------------------------------------------------
def asSQL (self, paramDict):
"""
Return a SQL code snippet for a unary operation.
@param paramDict: parameter dictionary which will be extended
@return: SQL code (in python-format) for the operation
"""
return self._operator_ % self._children [0].asSQL (paramDict)
# =============================================================================
# Base class for binary operations
# =============================================================================
class GBinaryConditionElement (GConditionElement):
"""
Abstract base class for all binary condition elements.
"""
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__ (self, parent = None, elementType = ''):
GConditionElement.__init__ (self, parent, elementType)
self._maxChildren_ = 2
self.values = []
# ---------------------------------------------------------------------------
# Evaluating a binary element means evaluation of both children
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
"""
This function evaluates both children of a binary element storing their
values in the property 'values'. Descendants can use these values for
further evaluations.
@raise ArgumentCountError: if the number of child elements somewhere in the
tree is incorrect.
"""
self._needChildren ()
self.values = unify ([child.evaluate (lookup) for child in self._children])
# =============================================================================
# Logical operators
# =============================================================================
# -----------------------------------------------------------------------------
# n-ary operation: AND
# -----------------------------------------------------------------------------
class GCand (GConditionElement):
"""
Logical AND.
"""
def __init__ (self, parent = None):
GConditionElement.__init__ (self, parent, 'GCand')
self._operator_ = u"AND"
# -----------------------------------------------------------------------------
# n-ary operation: OR
# -----------------------------------------------------------------------------
class GCor (GConditionElement):
"""
Logical OR.
"""
def __init__ (self, parent = None):
GConditionElement.__init__ (self, parent, 'GCor')
self._operator_ = u"OR"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
"""
This function concatenates all children of this element by a logical OR.
The iteration stops on the first 'true' result.
"""
for child in self._children:
if child.evaluate (lookup):
return True
return False
# -----------------------------------------------------------------------------
# unary operation: NOT
# -----------------------------------------------------------------------------
class GCnot (GUnaryConditionElement):
"""
Logical NOT.
"""
def __init__ (self, parent = None):
GUnaryConditionElement.__init__ (self, parent, 'GCnot')
self._operator_ = u"NOT %s"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
"""
This function logically inverts the child's evaluation
"""
self._needChildren ()
return not self._children [0].evaluate (lookup)
# =============================================================================
# Numeric operations
# =============================================================================
# ---------------------------------------------------------------------------
# n-ary operation: Addition
# ---------------------------------------------------------------------------
class GCadd (GConditionElement):
"""
Numeric addition.
"""
def __init__ (self, parent = None):
GConditionElement.__init__ (self, parent, 'GCadd')
self._operator_ = u"+"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
"""
This function creates the sum of all it's children. A unify is used to
ensure all children evaluate to a numeric type.
"""
result = 0
for child in self._children:
result += unify ([child.evaluation (lookup), 0]) [0]
return result
# -----------------------------------------------------------------------------
# n-ary operation: Subtraction
# -----------------------------------------------------------------------------
class GCsub (GConditionElement):
"""
Numeric subtraction.
"""
def __init__ (self, parent = None):
GConditionElement.__init__ (self, parent, 'GCsub')
self._operator_ = u"-"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
result = None
for child in self._children:
value = unify ([child.evaluation (lookup), 0]) [0]
if result is None:
result = value
else:
result -= value
return result
# -----------------------------------------------------------------------------
# n-ary operation: Multiplication
# -----------------------------------------------------------------------------
class GCmul (GConditionElement):
"""
Numeric multiplication.
"""
def __init__ (self, parent = None):
GConditionElement.__init__ (self, parent, 'GCmul')
self._operator_ = u"*"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
result = None
for child in self._children:
value = unify ([child.evaluate (lookup), 0]) [0]
if result is None:
result = value
else:
result *= value
return result
# -----------------------------------------------------------------------------
# n-ary operation: Division
# -----------------------------------------------------------------------------
class GCdiv (GConditionElement):
"""
Numeric division.
"""
def __init__ (self, parent = None):
GConditionElement.__init__ (self, parent, 'GCdiv')
self._operator_ = u"/"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
result = None
for child in self._children:
value = unify ([child.evaluate (lookup), 0]) [0]
if result is None:
result = value
else:
result /= value
return result
# -----------------------------------------------------------------------------
# unary operation: numeric negation
# -----------------------------------------------------------------------------
class GCnegate (GUnaryConditionElement):
"""
Numeric negation.
"""
def __init__ (self, parent = None):
GUnaryConditionElement.__init__ (self, parent, 'GCnegate')
self._operator_ = u"-%s"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
"""
This function does a numeric negation on the child's evaluation result.
"""
self._needChildren ()
return -unify ([self._children [0].evaluate (lookup), 0]) [0]
# =============================================================================
# Relational operations
# =============================================================================
# -----------------------------------------------------------------------------
# Equality
# -----------------------------------------------------------------------------
class GCeq (GBinaryConditionElement):
"""
Test for equality.
"""
def __init__ (self, parent = None):
GBinaryConditionElement.__init__ (self, parent, 'GCeq')
self._operator_ = u"="
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
GBinaryConditionElement.evaluate (self, lookup)
return self.values [0] == self.values [1]
# ---------------------------------------------------------------------------
def asSQL (self, paramDict):
"""
Return a SQL code snippet for this equal relation. If the right hand
element of the relation is a constant with a value of None, the operator
will be changed to the keyword 'IS'.
@param paramDict: parameter dictionary which will be extended
@return: SQL code for the condition element
"""
if isinstance (self._children [1], GCConst) and \
self._children [1].value is None:
self._operator_ = u"IS"
return GBinaryConditionElement.asSQL (self, paramDict)
# -----------------------------------------------------------------------------
# Inequality
# -----------------------------------------------------------------------------
class GCne (GBinaryConditionElement):
"""
Test for inequality.
"""
def __init__ (self, parent = None):
GBinaryConditionElement.__init__ (self, parent, 'GCne')
self._operator_ = u"!="
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
GBinaryConditionElement.evaluate (self, lookup)
return self.values [0] != self.values [1]
# ---------------------------------------------------------------------------
def asSQL (self, paramDict):
"""
Return a SQL code snippet for this inequal relation. If the right hand
element of the relation is a constant with a value of None, the operator
will be changed to the keyword 'IS NOT'.
@param paramDict: parameter dictionary which will be extended
@return: SQL code for the condition element
"""
if isinstance (self._children [1], GCConst) and \
self._children [1].value is None:
self._operator_ = u"IS NOT"
return GBinaryConditionElement.asSQL (self, paramDict)
# -----------------------------------------------------------------------------
# Greater Than
# -----------------------------------------------------------------------------
class GCgt (GBinaryConditionElement):
"""
Test for greater than.
"""
def __init__ (self, parent = None):
GBinaryConditionElement.__init__ (self, parent, 'GCgt')
self._operator_ = u">"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
GBinaryConditionElement.evaluate (self, lookup)
return self.values [0] > self.values [1]
# -----------------------------------------------------------------------------
# Greater or Equal
# -----------------------------------------------------------------------------
class GCge (GBinaryConditionElement):
"""
Test for greater or equal.
"""
def __init__ (self, parent = None):
GBinaryConditionElement.__init__ (self, parent, 'GCge')
self._operator_ = u">="
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
GBinaryConditionElement.evaluate (self, lookup)
return self.values [0] >= self.values [1]
# -----------------------------------------------------------------------------
# Less Than
# -----------------------------------------------------------------------------
class GClt (GBinaryConditionElement):
"""
Test for lower than.
"""
def __init__ (self, parent = None):
GBinaryConditionElement.__init__ (self, parent, 'GClt')
self._operator_ = u"<"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
GBinaryConditionElement.evaluate (self, lookup)
return self.values [0] < self.values [1]
# -----------------------------------------------------------------------------
# Less or Equal
# -----------------------------------------------------------------------------
class GCle (GBinaryConditionElement):
"""
Test for lower or equal.
"""
def __init__ (self, parent = None):
GBinaryConditionElement.__init__ (self, parent, 'GCle')
self._operator_ = u"<="
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
GBinaryConditionElement.evaluate (self, lookup)
return self.values [0] <= self.values [1]
# -----------------------------------------------------------------------------
# Like
# -----------------------------------------------------------------------------
class GClike (GBinaryConditionElement):
"""
Test for SQL LIKE.
"""
def __init__ (self, parent = None):
GBinaryConditionElement.__init__ (self, parent, 'GClike')
self._operator_ = u"LIKE"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
GBinaryConditionElement.evaluate (self, lookup)
# None cannot be like something else. You should use 'NULL' or 'NOT NULL'
# instead
if self.values [0] is None:
return False
strpat = "^%s" % self.values [1]
strpat = strpat.replace ('?', '.').replace ('%', '.*')
pattern = re.compile (strpat)
return pattern.match (self.values [0]) is not None
# -----------------------------------------------------------------------------
# Not Like
# -----------------------------------------------------------------------------
class GCnotlike (GBinaryConditionElement):
"""
Test for SQL NOT LIKE.
"""
def __init__ (self, parent = None):
GBinaryConditionElement.__init__ (self, parent, 'GCnotlike')
self._operator_ = u"NOT LIKE"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
GBinaryConditionElement.evaluate (self, lookup)
strpat = "^%s" % self.values [1]
strpat = strpat.replace ('?', '.').replace ('%', '.*')
pattern = re.compile (strpat)
return pattern.match (self.values [0]) is None
# -----------------------------------------------------------------------------
# Between
# -----------------------------------------------------------------------------
class GCbetween (GConditionElement):
"""
Test for SQL BETWEEN.
"""
def __init__ (self, parent = None):
GConditionElement.__init__ (self, parent, 'GCbetween')
self._maxChildren_ = 3
self._operator_ = u"BETWEEN"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
self._needChildren ()
values = unify ([v.evaluate (lookup) for v in self._children])
return values [1] <= values [0] <= values [2]
# ---------------------------------------------------------------------------
def asSQL (self, paramDict):
"""
Return a SQL code snippet for this condition element.
@param paramDict: parameter dictionary which will be extended
@return: SQL code for the condition element
"""
f = isinstance (self.getParent (), GConditionElement) and u'(%s)' or u'%s'
return f % ('%s BETWEEN %s AND %s' \
% tuple ([item.asSQL (paramDict) for item in self._children]))
# -----------------------------------------------------------------------------
# Not Between
# -----------------------------------------------------------------------------
class GCnotbetween (GConditionElement):
"""
Test for SQL NOT BETWEEN.
"""
def __init__ (self, parent = None):
GConditionElement.__init__ (self, parent, 'GCnotbetween')
self._maxChildren_ = 3
self._operator_ = u"NOT BETWEEN"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
self._needChildren ()
values = unify ([v.evaluate (lookup) for v in self._children])
return not (values [1] <= values [0] <= values [2])
# ---------------------------------------------------------------------------
def asSQL (self, paramDict):
"""
Return a SQL code snippet for this condition element.
@param paramDict: parameter dictionary which will be extended
@return: SQL code for the condition element
"""
f = isinstance (self.getParent (), GConditionElement) and u'(%s)' or u'%s'
return f % ('%s NOT BETWEEN %s AND %s' \
% tuple ([item.asSQL (paramDict) for item in self._children]))
# -----------------------------------------------------------------------------
# is NULL
# -----------------------------------------------------------------------------
class GCnull (GUnaryConditionElement):
"""
Test for SQL IS NULL
"""
def __init__ (self, parent = None):
GUnaryConditionElement.__init__ (self, parent, 'GCnull')
self._operator_ = u"(%s IS NULL)"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
self._needChildren ()
return self._children [0].evaluate (lookup) is None
# -----------------------------------------------------------------------------
# is Not NULL
# -----------------------------------------------------------------------------
class GCnotnull (GUnaryConditionElement):
"""
Test for SQL IS NOT NULL
"""
def __init__ (self, parent = None):
GUnaryConditionElement.__init__ (self, parent, 'GCnotnull')
self._operator_ = u"(%s IS NOT NULL)"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
self._needChildren ()
return self._children [0].evaluate (lookup) is not None
# -----------------------------------------------------------------------------
# upper
# -----------------------------------------------------------------------------
class GCupper (GUnaryConditionElement):
"""
String conversion to uppercase.
"""
def __init__ (self, parent = None):
GUnaryConditionElement.__init__ (self, parent, 'GCupper')
self._operator_ = u"UPPER(%s)"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
self._needChildren ()
return (self._children [0].evaluate (lookup)).upper ()
# -----------------------------------------------------------------------------
# lower
# -----------------------------------------------------------------------------
class GClower (GUnaryConditionElement):
"""
String conversion to lowercase.
"""
def __init__ (self, parent = None):
GUnaryConditionElement.__init__ (self, parent, 'GClower')
self._operator_ = u"LOWER(%s)"
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
self._needChildren ()
return (self._children [0].evaluate (lookup)).lower ()
# -----------------------------------------------------------------------------
# exist
# -----------------------------------------------------------------------------
class GCexist (GConditionElement):
"""
Test if a record fulfilling a given condition exists in another table.
"""
def __init__ (self, parent = None):
GConditionElement.__init__ (self, parent, 'GCexist')
self.callback = None
# ---------------------------------------------------------------------------
def evaluate (self, lookup):
if self.callback is None:
raise NotImplementedError
return self.callback (self, lookup)
# ---------------------------------------------------------------------------
def prefixNotation (self):
"""
This function returns the prefix notation of an exist element and all it's
children.
"""
result = ['exist', self.table, self.masterlink, self.detaillink]
for child in self._children:
result.append (child.prefixNotation ())
return result
# ---------------------------------------------------------------------------
def asSQL (self, paramDict):
"""
Return the SQL statement for this condition, using a subselect.
"""
sql = '%s IN (SELECT %s FROM %s WHERE %s)' % (
self.masterlink, self.detaillink, self.table,
' AND '.join ([c.asSQL (paramDict) for c in self._children]))
return sql
# =============================================================================
# Return a dictionary of all XML elements available
# =============================================================================
def getXMLelements (updates = {}):
xmlElements = {
'condition': {
'BaseClass' : GCondition,
'ParentTags' : ()},
'cfield': {
'BaseClass' : GCField,
'Description': 'Defines a database table\'s field in a condition.',
'Attributes' : {'name': {'Required': True,
'Typecast': GTypecast.name }},
'ParentTags' : ('eq','ne','lt','le','gt','ge','add','sub','mul',
'div','negate','like','notlike','between',
'notbetween','upper','lower','null','notnull')},
'cconst': {
'BaseClass' : GCConst,
'Description': 'Defines a constant value in a condition.',
'Attributes' : {'value': {'Required': True,
'Typecast': GTypecast.text},
'type' : {'Typecast': GTypecast.text }},
'ParentTags' : ('eq','ne','lt','le','gt','ge','add','sub','mul',
'div','negate','like','notlike','between',
'notbetween')},
'cparam': {
'BaseClass' : GCParam,
'Description': 'Defines a parameter value in a condition.',
'Attributes' : {'name': {'Required': True,
'Unique': True,
'Typecast': GTypecast.name }},
'ParentTags' : ('eq','ne','lt','le','gt','ge','add','sub','mul',
'div','negate','like','notlike','between',
'notbetween')},
'and': {
'BaseClass' : GCand,
'Description': 'Implements logical AND relation.',
'ParentTags' : ('condition','and','or','not','exist')},
'or': {
'BaseClass' : GCor,
'Description': 'Implements logical OR relation.',
'ParentTags' : ('condition','and','or','not','exist')},
'not': {
'BaseClass' : GCnot,
'Description': 'Implements logical NOT relation.',
'ParentTags' : ('condition','and','or','not','exist')},
'add': {
'BaseClass' : GCadd,
'Description': 'Implements addition.',
'ParentTags' : ('eq','ne','lt','le','gt','ge','add','sub','mul',
'div','negate','between','notbetween')},
'sub': {
'BaseClass' : GCsub,
'Description': 'Implements subtraction.',
'ParentTags' : ('eq','ne','lt','le','gt','ge','add','sub','mul',
'div','negate','between','notbetween')},
'mul': {
'BaseClass' : GCmul,
'Description': 'Implements multiplication.',
'ParentTags' : ('eq','ne','lt','le','gt','ge','add','sub','mul',
'div','negate','between','notbetween')},
'div': {
'BaseClass' : GCdiv,
'Description': 'Implements division.',
'ParentTags' : ('eq','ne','lt','le','gt','ge','add','sub','mul',
'div','negate','between','notbetween')},
'negate': {
'BaseClass' : GCnegate,
'Description': 'Implements numerical negation.',
'ParentTags' : ('eq','ne','lt','le','gt','ge','add','sub','mul',
'div','between','notbetween')},
'eq': {
'BaseClass' : GCeq,
'Description': 'Implements a {field} = {value} condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'ne': {
'BaseClass' : GCne,
'Description': 'Implements a {field} <> {value} condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'gt': {
'BaseClass' : GCgt,
'Description': 'Implements a {field} > {value} condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'ge': {
'BaseClass' : GCge,
'Description': 'Implements a {field} >= {value} condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'lt': {
'BaseClass' : GClt,
'Description': 'Implements a {field} < {value} condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'le': {
'BaseClass' : GCle,
'Description': 'Implements a {field} <= {value} condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'like': {
'BaseClass' : GClike,
'Description': 'Implements a {field} LIKE {value} condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'notlike': {
'BaseClass' : GCnotlike,
'Description': 'Implements a {field} NOT LIKE {value} condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'between': {
'BaseClass' : GCbetween,
'Description': 'Implements a {field} BETWEEN {value1} {value2} '
'condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'notbetween': {
'BaseClass' : GCnotbetween,
'Description': 'Implements a {field} NOT BETWEEN {value1} {value2} '
'condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'null': {
'BaseClass' : GCnull,
'Description': 'Implements a {field} IS NULL condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'notnull': {
'BaseClass' : GCnotnull,
'Description': 'Implements a {field} IS NOT NULL condition.',
'ParentTags' : ('condition','and','or','not','exist')},
'upper': {
'BaseClass' : GCupper,
'Description': 'Implements upper({value}).',
'ParentTags' : ('eq','ne','lt','le','gt','ge',
'like','notlike','between','notbetween')},
'lower': {
'BaseClass' : GClower,
'Description': 'Implements lower({value}).',
'ParentTags' : ('eq','ne','lt','le','gt','ge',
'like','notlike','between','notbetween')},
'exist': {
'BaseClass' : GCexist,
'Description': 'Implements an exist condition.',
'Attributes' : {'table' : {'Required': True,
'Typecast': GTypecast.name},
'masterlink': {'Required': True,
'Typecast': GTypecast.text},
'detaillink': {'Required': True,
'Typecast': GTypecast.text}},
'ParentTags' : ('condition','and','or','not','exist')}}
for alteration in updates.keys ():
xmlElements [alteration].update (updates [alteration])
return xmlElements
# =============================================================================
# Convenience methods
# =============================================================================
# -----------------------------------------------------------------------------
# Create a condition tree either from a prefix list or a dictionary
# -----------------------------------------------------------------------------
def buildCondition (condition, comparison = GCeq, logic = GCand):
"""
Create a condition tree either from a sequence in prefix notation or a
dictionary. In the latter case an optional comparison- and logic-operator
class might be specified.
@param condition: sequence in prefix notation or a dictionary with the
condition to be converted
@param comparison: (operator) class used to compare keys and values
@param logic: (operator) class used to concatenate multiple comparisons
@return: GCondition tree
"""
checktype (condition, [list, dict, GCondition, None])
if isinstance (condition, list):
return GCondition (prefixList = condition)
elif isinstance (condition, dict):
return buildConditionFromDict (condition, comparison, logic)
elif isinstance (condition, GCondition):
return condition
else: # None
return GCondition ()
# -----------------------------------------------------------------------------
# Create a condition tree from an element sequence in prefix notation
# -----------------------------------------------------------------------------
def buildConditionFromPrefix (prefixList):
"""
This function creates a new condition tree from the given element sequence,
which must be in prefix notation.
@param prefixList: sequence of condition elements in prefix notation
@return: GCondition tree
"""
checktype (prefixList, list)
return GCondition (prefixList = prefixList)
# -----------------------------------------------------------------------------
# Create a condition tree from a dictionary
# -----------------------------------------------------------------------------
def buildConditionFromDict (dictionary, comparison = GCeq, logic = GCand):
"""
This function creates a new condition tree using the given comparison as
operation between keys and values and a given logic as concatenation for all
keys.
@param dictionary: dictionary with (key, value) pairs to convert into a
condition tree
@param comparison: (operator) class used to compare keys and values
@param logic: (operator) class used to concatenate multiple comparisons
@return: GCondition tree
"""
checktype (dictionary, dict)
c = comparison ()._type [2:]
pList = [[c, ['field', f], ['const', v]] for (f, v) in dictionary.items ()]
if pList:
pList.insert (0, logic ()._type [2:])
return GCondition (prefixList = pList)
# -----------------------------------------------------------------------------
# Combine two conditions with an AND clause
# -----------------------------------------------------------------------------
def combineConditions (cond1, cond2):
"""
Combine two conditions using an AND operator. Both arguments can be given as
condition trees (GCondition), dictionaries or prefix sequences. The resulting
combination is a *new* condition tree. None of the arguments will be changed.
@param cond1: condition-tree, -dictionary or -sequence (prefix list)
@param cond2: condition-tree, -dictionary or -sequence (prefix list)
@return: new GCondition instance with an AND-combination of both conditions
"""
# First check for the trivial cases. If there is only one part defined we can
# return a *copy* of that condition
if not cond1:
return buildCondition (buildCondition (cond2).prefixNotation ())
elif not cond2:
return buildCondition (buildCondition (cond1).prefixNotation ())
# otherwise make sure to have GCondition instances on both sides
cond1 = buildCondition (cond1)
cond2 = buildCondition (cond2)
# If the condition starts with an AND operator we start at that point,
# otherwise use the condition as it is
top1 = (cond1.findChildOfType ('GCand') or cond1).prefixNotation ()
top2 = (cond2.findChildOfType ('GCand') or cond2).prefixNotation ()
if top1 and top1 [0] == 'and': top1 = top1 [1:]
if top2 and top2 [0] == 'and': top2 = top2 [1:]
ncond = ['and']
if top1: ncond.append (top1)
if top2: ncond.append (top2)
return buildCondition (ncond)
# -----------------------------------------------------------------------------
# Unify all elements in values to the same type
# -----------------------------------------------------------------------------
def unify (values):
"""
Convert all items in a given sequence to the same types.
@param values: sequence of items to be converted to a common type
@return: sequence of converted items having all the same datatype.
"""
checktype (values, list)
result = []
__unify (values, result)
return result
# -----------------------------------------------------------------------------
def __unify (values, result):
if not len (values):
return
elif len (values) == 1:
result.append (values [0])
return
if isinstance (values [0], str):
values [0] = unicode (values [0])
if isinstance (values [1], str):
values [1] = unicode (values [1])
v1 = values [0]
v2 = values [1]
if v1 is None or v2 is None:
result.append (None)
values.remove (None)
__unify (values, result)
elif type (v1) == type (v2):
result.append (v1)
values.remove (v1)
__unify (values, result)
else:
# String-Conversions
if isinstance (v1, unicode) or isinstance (v2, unicode):
if isinstance (v1, unicode):
oldValue = v1
chkValue = v2
else:
oldValue = v2
chkValue = v1
# String to Boolean
if isinstance (chkValue, bool):
if oldValue.upper () in ['TRUE', 'T']:
newValue = True
elif oldValue.upper () in ['FALSE', 'F']:
newValue = False
else:
raise ConversionError, (oldValue, chkValue)
# String to Integer, Long or Float
elif isinstance (chkValue, int) or \
isinstance (chkValue, long) or \
isinstance (chkValue, float):
try:
if oldValue.upper () in ['TRUE', 'T']:
newValue = 1
elif oldValue.upper () in ['FALSE', 'F']:
newValue = 0
else:
newValue = int (oldValue)
except ValueError:
try:
newValue = float (oldValue)
except ValueError:
raise ConversionError, (oldValue, chkValue)
# String to DateTime
elif isinstance (chkValue, datetime.datetime) or \
isinstance (chkValue, datetime.time) or \
isinstance (chkValue, datetime.date):
try:
new = GDateTime.parseISO (oldValue)
if isinstance (chkValue, datetime.time):
newValue = datetime.time (new.hour, new.minute, new.second,
new.microsecond)
elif isinstance (chkValue, datetime.date):
newValue = datetime.date (new.year, new.month, new.day)
elif isinstance (chkValue, datetime.datetime):
newValue = datetime.datetime (new.year, new.month, new.day,
new.hour, new.minute, new.second, new.microsecond)
else:
newValue = new
except ValueError:
raise ConversionError, (oldValue, chkValue)
else:
raise ConversionRuleError, (oldValue, chkValue)
# Boolean conversions
elif isinstance (v1, bool) or isinstance (v2, bool):
if isinstance (v1, bool):
oldValue = v1
chkValue = v2
else:
oldValue = v2
chkValue = v1
# Boolean to Integer
if isinstance (chkValue, int):
if oldValue:
newValue = 1
else:
newValue = 0
# Boolean to Long
elif isinstance (chkValue, long):
if oldValue:
newValue = 1L
else:
newValue = 0L
# Boolean to Decimal
elif sys.hexversion >= 0x02040000 \
and isinstance (chkValue, decimal.Decimal):
if oldValue:
newValue = decimal.Decimal(1)
else:
newValue = decimal.Decimal(0)
else:
raise ConversionRuleError, (oldValue, chkValue)
# Integer conversions
elif isinstance (v1, int) or isinstance (v2, int):
if isinstance (v1, int):
oldValue = v1
chkValue = v2
else:
oldValue = v2
chkValue = v1
# Integer to Float
if isinstance (chkValue, float):
newValue = float (oldValue)
# Integer to Long
elif isinstance (chkValue, long):
newValue = long (oldValue)
# Integer to Decimal
elif sys.hexversion >= 0x02040000 \
and isinstance (chkValue, decimal.Decimal):
newValue = decimal.Decimal (oldValue)
else:
raise ConversionRuleError, (oldValue, chkValue)
# Long conversions
elif isinstance (v1, long) or isinstance (v2, long):
if isinstance (v1, long):
oldValue = v1
chkValue = v2
else:
oldValue = v2
chkValue = v1
# Long to Float
if isinstance (chkValue, float):
newValue = float (oldValue)
# Long to Decimal
elif sys.hexversion >= 0x02040000 \
and isinstance (chkValue, decimal.Decimal):
newValue = decimal.Decimal (oldValue)
else:
raise ConversionRuleError, (oldValue, chkValue)
# Decimal conversion (Python 2.4 or later)
elif sys.hexversion >= 0x02040000 \
and (isinstance (v1, decimal.Decimal) \
or isinstance (v2, decimal.Decimal)):
if isinstance (v1, decimal.Decimal):
oldValue = v1
chkValue = v2
else:
oldValue = v2
chkValue = v1
# Decimal into Float
if isinstance (chkValue, float):
newValue = float (oldValue)
else:
raise ConversionRuleError, (oldValue, chkValue)
elif isinstance (v1, datetime.datetime) or \
isinstance (v2, datetime.datetime):
if isinstance (v1, datetime.datetime):
oldValue = v1
chkValue = v2
else:
oldValue = v2
chkValue = v1
if isinstance (chkValue, datetime.date):
newValue = oldValue.date ()
else:
raise ConversionRuleError, (v1, v2)
elif isinstance (v1, datetime.timedelta) or \
isinstance (v2, datetime.timedelta):
if isinstance (v1, datetime.timedelta):
oldValue = v1
chkValue = v2
else:
oldValue = v2
chkValue = v1
if isinstance (chkValue, datetime.time):
newValue = datetime.time (oldValue.seconds / 3600,
oldValue.seconds % 3600 / 60, oldValue.seconds % 60,
oldValue.microseconds)
else:
raise ConversionRuleError, (v1, v2)
else:
raise ConversionRuleError, (v1, v2)
values [oldValue == v2] = newValue
__unify (values, result)
| gpl-2.0 | -955,720,464,379,371,300 | 29.783352 | 79 | 0.501728 | false |
yongshengwang/hue | desktop/core/ext-py/guppy-0.1.10/guppy/etc/KnuthBendix.py | 37 | 7764 | #._cv_part guppy.etc.KnuthBendix
"""
An implementation of the Knuth-Bendix algorithm,
as described in (1), p. 143.
For determining if two paths in a category are equal.
The algorithm as given here,
takes a set of equations in the form of a sequence:
E = [(a, b), (c, d) ...]
where a, b, c, d are 'paths'.
Paths are given as strings, for example:
E = [ ('fhk', 'gh'), ('m', 'kkm') ]
means that the path 'fhk' equals 'gh' and 'm' equals 'kkm'.
Each arrow in the path is here a single character. If longer arrow
names are required, a delimiter string can be specified as in:
kb(E, delim='.')
The paths must then be given by the delimiter between each arrow;
E = [ ('h_arrow.g_arrow', 'g_arrow.k_arrow') ... ]
The function kb(E) returns an object, say A, which is
o callable: A(a, b)->boolean determines if two paths given by a, b are equal.
o has a method A.reduce(a)->pathstring, which reduces a path to normal form.
An optional parameter to kb, max_iterations, determines the maximum
number of iterations the algorithm should try making the reduction
system 'confluent'. The algorithm is not guaranteed to terminate
with a confluent system in a finite number of iterations, so if the
number of iterations needed exceeds max_iterations an exception
(ValueError) will be raised. The default is 100.
References
(1)
@book{walters91categories,
title={Categories and Computer Science},
author={R. F. C. Walters},
publisher={Cambridge University Press},
location={Cambridge},
year=1991}
(2)
@book{grimaldi94discrete,
author="Ralph P. Grimaldi".
title="Discrete and Combinatorial Mathematics: An Applied Introduction",
publisher="Addison-Wesley",
location="Readin, Massachusetts",
year=1994
}
"""
class KnuthBendix:
def __init__(self, E, delim = '', max_iterations = 100):
self.reductions = []
self.delim = delim
for a, b in E:
if delim:
a = self.wrap_delim(a)
b = self.wrap_delim(b)
if self.gt(b, a):
a, b = b, a
self.reductions.append((a, b))
self.make_confluent(max_iterations)
self.sort()
def __call__(self, x, y):
return self.reduce(x) == self.reduce(y)
def gt(self, a, b):
delim = self.delim
if delim:
la = len(a)
lb = len(b)
else:
la = a.count(delim)
lb = b.count(delim)
if la > lb:
return 1
if la < lb:
return 0
return a > b
def make_confluent(self, max_iterations):
def add_reduction(p, q):
if p != q:
#pdb.set_trace()
if self.gt(p, q):
self.reductions.append((p, q))
else:
self.reductions.append((q, p))
self.confluent = 0
reds_tested = {}
for i in range(max_iterations):
#print 'iter', i
self.confluent = 1
reds = list(self.reductions)
for u1, v1 in reds:
for u2, v2 in reds:
red = (u1, u2, u2, v2)
if red in reds_tested:
continue
reds_tested[red] = 1
if u2 in u1:
p = self.freduce(v1)
i = u1.index(u2)
while i >= 0:
uuu = u1[:i]+v2+u1[i+len(u2):]
q = self.freduce(uuu)
add_reduction(p, q)
i = u1.find(u2, i+1)
if 0:
uuu = u1.replace(u2, v2)
q = self.freduce(uuu)
add_reduction(p, q)
lu1 = len(u1)
for i in range(1, lu1-len(self.delim)):
if u2[:lu1-i] == u1[i:]:
p = self.freduce(v1 + u2[lu1-i:])
q = self.freduce(u1[:i] + v2)
add_reduction(p, q)
assert ('', '') not in reds
# Remove redundant reductions
newr = []
nullred = (self.delim, self.delim)
for i, uv in enumerate(self.reductions):
u, v = uv
self.reductions[i] = nullred
ru = self.freduce(u)
rv = self.freduce(v)
if ru != v and ru != rv:
urv = (u, rv)
newr.append(urv)
self.reductions[i] = urv
else:
pass
#pdb.set_trace()
if len(newr) != self.reductions:
assert ('', '') not in newr
self.reductions = newr
assert ('', '') not in self.reductions
#assert ('', '') not in reds
if self.confluent:
break
else:
raise ValueError, """\
KnuthBendix.make_confluent did not terminate in %d iterations.
Check your equations or specify an higher max_iterations value.'
"""%max_iterations
#print len(reds_tested)
def freduce(self, p):
# This (internal) variant of reduce:
# Uses the internal representaion:
# Assumes p is .surrounded. by the delimiter
# and returns the reduced value .surrounded. by it.
# This is primarily for internal use by make_confluent
while 1:
q = p
for uv in self.reductions:
p = p.replace(*uv)
if q == p:
break
return p
def reduce(self, p):
# This (external) variant of reduce:
# will add delim if not .surrounded. by delim
# but the return value will not be surrounded by it.
if self.delim:
p = self.wrap_delim(p)
p = self.freduce(p)
if self.delim:
p = p.strip(self.delim)
return p
def sort(self, reds = None):
if reds is None:
reds = self.reductions
def cmp((x, _), (y, __)):
if self.gt(x, y):
return 1
if x == y:
return 0
return -1
reds.sort(cmp)
def pp(self):
printreds(self.reductions)
def wrap_delim(self, p):
if not p.startswith(self.delim):
p = self.delim + p
if not p.endswith(self.delim):
p = p + self.delim
return p
def printreds(reds):
for i, uv in enumerate(reds):
print '%s\t'%(uv,),
if (i + 1) % 4 == 0:
print
if (i + 1) % 4 != 0:
print
def kb(E, *a, **k):
return KnuthBendix(E, *a, **k)
class _GLUECLAMP_:
pass
def test2():
#
# The group of complex numbers {1, -1, i, -i} under multiplication;
# generators and table from Example 16.13 in (2).
G = ['1', '-1', 'i', '-i']
E = [('1.i', 'i'),
('i.i', '-1'),
('i.i.i', '-i'),
('i.i.i.i', '1'),
]
R = kb(E, delim='.')
T = [['.']+G] + [[y]+[R.reduce('%s.%s'%(y, x)) for x in G] for y in G]
assert T == [
['.', '1', '-1', 'i', '-i'],
['1', '1', '-1', 'i', '-i'],
['-1', '-1', '1', '-i', 'i'],
['i', 'i', '-i', '-1', '1'],
['-i', '-i', 'i', '1', '-1']]
return R
def test():
E = [('.a.', '.b.')]
a = kb(E,delim='.')
assert a('.a.', '.b.')
E = [('fhk', 'gh'), ('m', 'kkm')]
a = kb(E)
p = a.reduce('fffghkkkm')
q = a.reduce('ffghkm')
assert p == 'ffffhm'
assert q == 'fffhm'
assert not a(p, q)
E = [('.a.', '.b.')]
a = kb(E, delim='.')
p = a.reduce('aa')
assert p == 'aa'
p = a.reduce('.bb.')
assert p == 'bb'
p = a.reduce('b')
assert p == 'a'
E = [('.f.h.k.', '.g.h.'), ('.m.', '.k.k.m.')]
a = kb(E, delim='.')
p = a.reduce('.f.f.f.g.h.k.k.k.m.')
q = a.reduce('.f.f.g.h.k.m.')
assert p, q == ('.f.f.f.f.h.m.', '.f.f.f.h.m.')
assert p == 'f.f.f.f.h.m'
assert q == 'f.f.f.h.m'
E = [('.f.ff.fff.', '.ffff.ff.'), ('.fffff.', '.fff.fff.fffff.')]
a = kb(E, delim='.')
p = a.reduce('.f.f.f.ffff.ff.fff.fff.fff.fffff.')
q = a.reduce('.f.f.ffff.ff.fff.fffff.')
#print p, q
assert p == 'f.f.f.f.ff.fffff'
assert q == 'f.f.f.ff.fffff'
def test3():
# From 9.3 in 251
E = [('Hcc', 'H'),
('aab','ba'),
('aac','ca'),
('cccb','abc'),
('caca','b')]
a = kb(E)
canon = [
('Hb','Ha'), ('Haa','Ha'), ('Hab','Ha'), ('Hca','Hac'),
('Hcb','Hac'), ('Hcc','H'), ('aab','ba'), ('aac','ca'),
('abb','bb'), ('abc','cb'), ('acb','cb'), ('baa','ba'),
('bab','bb'), ('bac','cb'), ('bba','bb'), ('bca','cb'),
('bcb','bbc'), ('cab','cb'), ('cba','cb'), ('cbb','bbc'),
('cbc','bb'), ('ccb','bb'), ('Haca','Hac'), ('Hacc','Ha'),
('bbbb','bb'), ('bbbc','cb'), ('bbcc','bbb'), ('bcca','bb'),
('caca','b'), ('ccaa','ba'), ('ccca','cb'), ('cacca','cb')
]
a.canon = canon
if 0:
for uv in canon:
if not uv in a.reductions:
print uv
return a
| apache-2.0 | -8,588,377,452,142,216,000 | 22.245509 | 77 | 0.551005 | false |
kenshay/ImageScripter | ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/telemetry/testing/browser_test_case.py | 6 | 3561 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from functools import wraps
import logging
import os
import sys
import types
import unittest
from telemetry.internal.browser import browser_finder
from telemetry.internal.util import path
from telemetry.testing import options_for_unittests
current_browser_options = None
current_browser = None
class _MetaBrowserTestCase(type):
"""Metaclass for BrowserTestCase.
The metaclass wraps all test* methods of all subclasses of BrowserTestCase to
print browser standard output and log upon failure.
"""
def __new__(mcs, name, bases, dct):
new_dct = {}
for attributeName, attribute in dct.iteritems():
if (isinstance(attribute, types.FunctionType) and
attributeName.startswith('test')):
attribute = mcs._PrintBrowserStandardOutputAndLogOnFailure(attribute)
new_dct[attributeName] = attribute
return type.__new__(mcs, name, bases, new_dct)
@staticmethod
def _PrintBrowserStandardOutputAndLogOnFailure(method):
@wraps(method)
def WrappedMethod(self):
try: # pylint: disable=broad-except
method(self)
except Exception:
exc_info = sys.exc_info()
if self._browser:
self._browser.DumpStateUponFailure()
else:
logging.warning('Cannot dump browser state: No browser.')
# Re-raise the original exception. Note that we can't just use 'raise'
# without any arguments because an exception might have been thrown when
# dumping the state of the browser.
raise exc_info[0], exc_info[1], exc_info[2]
return WrappedMethod
def teardown_browser():
global current_browser
global current_browser_options
if current_browser:
current_browser.Close()
current_browser.platform.network_controller.Close()
current_browser = None
current_browser_options = None
class BrowserTestCase(unittest.TestCase):
__metaclass__ = _MetaBrowserTestCase
@classmethod
def setUpClass(cls):
cls._platform = None
global current_browser
global current_browser_options
options = options_for_unittests.GetCopy()
cls.CustomizeBrowserOptions(options.browser_options)
if not current_browser or (current_browser_options !=
options.browser_options):
if current_browser:
teardown_browser()
browser_to_create = browser_finder.FindBrowser(options)
if not browser_to_create:
raise Exception('No browser found, cannot continue test.')
cls._platform = browser_to_create.platform
cls._platform.network_controller.InitializeIfNeeded()
try:
current_browser = browser_to_create.Create(options)
current_browser_options = options.browser_options
except:
cls.tearDownClass()
raise
cls._browser = current_browser
cls._device = options.remote_platform_options.device
@classmethod
def tearDownClass(cls):
if cls._platform:
cls._platform.StopAllLocalServers()
cls._platform.network_controller.Close()
@classmethod
def CustomizeBrowserOptions(cls, options):
"""Override to add test-specific options to the BrowserOptions object"""
pass
@classmethod
def UrlOfUnittestFile(cls, filename):
cls._platform.SetHTTPServerDirectories(path.GetUnittestDataDir())
file_path = os.path.join(path.GetUnittestDataDir(), filename)
return cls._platform.http_server.UrlOf(file_path)
| gpl-3.0 | 7,389,350,027,619,998,000 | 29.965217 | 80 | 0.708509 | false |
jameswatt2008/jameswatt2008.github.io | python/Python核心编程/网络编程/截图和代码/概述、SOCKET/多进程copy文件/test/nntplib.py | 11 | 43078 | """An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Incompatible changes from the 2.x nntplib:
# - all commands are encoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (POST, IHAVE)
# - all responses are decoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (ARTICLE, HEAD, BODY)
# - the `file` argument to various methods is keyword-only
#
# - NNTP.date() returns a datetime object
# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,
# rather than a pair of (date, time) strings.
# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples
# - NNTP.descriptions() returns a dict mapping group names to descriptions
# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)
# to field values; each dict representing a message overview.
# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)
# tuple.
# - the "internal" methods have been marked private (they now start with
# an underscore)
# Other changes from the 2.x/3.1 nntplib:
# - automatic querying of capabilities at connect
# - New method NNTP.getcapabilities()
# - New method NNTP.over()
# - New helper function decode_header()
# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and
# arbitrary iterables yielding lines.
# - An extensive test suite :-)
# TODO:
# - return structured data (GroupInfo etc.) everywhere
# - support HDR
# Imports
import re
import socket
import collections
import datetime
import warnings
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["NNTP",
"NNTPError", "NNTPReplyError", "NNTPTemporaryError",
"NNTPPermanentError", "NNTPProtocolError", "NNTPDataError",
"decode_header",
]
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# Standard port used by NNTP servers
NNTP_PORT = 119
NNTP_SSL_PORT = 563
# Response numbers that are followed by additional text (e.g. article)
_LONGRESP = {
'100', # HELP
'101', # CAPABILITIES
'211', # LISTGROUP (also not multi-line with GROUP)
'215', # LIST
'220', # ARTICLE
'221', # HEAD, XHDR
'222', # BODY
'224', # OVER, XOVER
'225', # HDR
'230', # NEWNEWS
'231', # NEWGROUPS
'282', # XGTITLE
}
# Default decoded value for LIST OVERVIEW.FMT if not supported
_DEFAULT_OVERVIEW_FMT = [
"subject", "from", "date", "message-id", "references", ":bytes", ":lines"]
# Alternative names allowed in LIST OVERVIEW.FMT response
_OVERVIEW_FMT_ALTERNATIVES = {
'bytes': ':bytes',
'lines': ':lines',
}
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo',
['group', 'last', 'first', 'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo',
['number', 'message_id', 'lines'])
# Helper function(s)
def decode_header(header_str):
"""Takes a unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
# Metadata name (e.g. ":bytes")
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
# Header name (e.g. "Subject:" or "Xref:full")
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
# Should we do something with the suffix?
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError("LIST OVERVIEW.FMT response too short")
if fmt[:len(defaults)] != defaults:
raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields")
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to an OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
# XXX should we raise an error? Some servers might not
# support LIST OVERVIEW.FMT and still return additional
# headers.
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
# Non-default header names are included in full in the response
# (unless the field is totally empty)
h = field_name + ": "
if token and token[:len(h)].lower() != h:
raise NNTPDataError("OVER/XOVER response doesn't include "
"names of additional headers")
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
# RFC 3977 doesn't say how to interpret 2-char years. Assume that
# there are no dates before 1970 on Usenet.
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = "000000"
else:
time_str = "{0.hour:02d}{0.minute:02d}{0.second:02d}".format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = "{0:02d}{1.month:02d}{1.day:02d}".format(y, dt)
else:
date_str = "{0:04d}{1.month:02d}{1.day:02d}".format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context, hostname):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
# Generate a default SSL context if none was passed.
if context is None:
context = ssl._create_stdlib_context()
return context.wrap_socket(sock, server_hostname=hostname)
# The classes themselves
class _NNTPBase:
# UTF-8 is the character set for all NNTP commands and responses: they
# are automatically encoded (when sending) and decoded (and receiving)
# by this class.
# However, some multi-line data blocks can contain arbitrary bytes (for
# example, latin-1 or utf-16 data in the body of a message). Commands
# taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message
# data will therefore only accept and produce bytes objects.
# Furthermore, since there could be non-compliant servers out there,
# we use 'surrogateescape' as the error handler for fault tolerance
# and easy round-tripping. This could be useful for some applications
# (e.g. NNTP gateways).
encoding = 'utf-8'
errors = 'surrogateescape'
def __init__(self, file, host,
readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- file: file-like object (open for read/write in binary mode)
- host: hostname of the server
- readermode: if true, send 'mode reader' command after
connecting.
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.file = file
self.debugging = 0
self.welcome = self._getresp()
# Inquire about capabilities (RFC 3977).
self._caps = None
self.getcapabilities()
# 'MODE READER' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'MODE READER' and 'AUTHINFO' need to
# arrive differs between some NNTP servers. If _setreadermode() fails
# with an authorization failed error, it will set this to True;
# the login() routine will interpret that as a request to try again
# after performing its normal function.
# Enable only if we're not already in READER mode anyway.
self.readermode_afterauth = False
if readermode and 'READER' not in self._caps:
self._setreadermode()
if not self.readermode_afterauth:
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
# RFC 4642 2.2.2: Both the client and the server MUST know if there is
# a TLS session active. A client MUST NOT attempt to start a TLS
# session if a TLS session is already active.
self.tls_on = False
# Log in and encryption setup order is left to subclasses.
self.authenticated = False
def __enter__(self):
return self
def __exit__(self, *args):
is_connected = lambda: hasattr(self, "file")
if is_connected():
try:
self.quit()
except (OSError, EOFError):
pass
finally:
if is_connected():
self._close()
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print('*welcome*', repr(self.welcome))
return self.welcome
def getcapabilities(self):
"""Get the server capabilities, as read by __init__().
If the CAPABILITIES command is not supported, an empty dict is
returned."""
if self._caps is None:
self.nntp_version = 1
self.nntp_implementation = None
try:
resp, caps = self.capabilities()
except (NNTPPermanentError, NNTPTemporaryError):
# Server doesn't support capabilities
self._caps = {}
else:
self._caps = caps
if 'VERSION' in caps:
# The server can advertise several supported versions,
# choose the highest.
self.nntp_version = max(map(int, caps['VERSION']))
if 'IMPLEMENTATION' in caps:
self.nntp_implementation = ' '.join(caps['IMPLEMENTATION'])
return self._caps
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def _putline(self, line):
"""Internal: send one line to the server, appending CRLF.
The `line` must be a bytes-like object."""
line = line + _CRLF
if self.debugging > 1: print('*put*', repr(line))
self.file.write(line)
self.file.flush()
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be a unicode string."""
if self.debugging: print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
def _getline(self, strip_crlf=True):
"""Internal: return one line from the server, stripping _CRLF.
Raise EOFError if the connection is closed.
Returns a bytes object."""
line = self.file.readline(_MAXLINE +1)
if len(line) > _MAXLINE:
raise NNTPDataError('line too long')
if self.debugging > 1:
print('*get*', repr(line))
if not line: raise EOFError
if strip_crlf:
if line[-2:] == _CRLF:
line = line[:-2]
elif line[-1:] in _CRLF:
line = line[:-1]
return line
def _getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error.
Returns a unicode string."""
resp = self._getline()
if self.debugging: print('*resp*', repr(resp))
resp = resp.decode(self.encoding, self.errors)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def _getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error.
Returns a (response, lines) tuple where `response` is a unicode
string and `lines` is a list of bytes objects.
If `file` is a file-like object, it must be open in binary mode.
"""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
resp = self._getresp()
if resp[:3] not in _LONGRESP:
raise NNTPReplyError(resp)
lines = []
if file is not None:
# XXX lines = None instead?
terminators = (b'.' + _CRLF, b'.\n')
while 1:
line = self._getline(False)
if line in terminators:
break
if line.startswith(b'..'):
line = line[1:]
file.write(line)
else:
terminator = b'.'
while 1:
line = self._getline()
if line == terminator:
break
if line.startswith(b'..'):
line = line[1:]
lines.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, lines
def _shortcmd(self, line):
"""Internal: send a command and get the response.
Same return value as _getresp()."""
self._putcmd(line)
return self._getresp()
def _longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same return value as _getlongresp()."""
self._putcmd(line)
return self._getlongresp(file)
def _longcmdstring(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same as _longcmd() and _getlongresp(), except that the returned `lines`
are unicode strings rather than bytes objects.
"""
self._putcmd(line)
resp, list = self._getlongresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _getoverviewfmt(self):
"""Internal: get the overview format. Queries the server if not
already done, else returns the cached value."""
try:
return self._cachedoverviewfmt
except AttributeError:
pass
try:
resp, lines = self._longcmdstring("LIST OVERVIEW.FMT")
except NNTPPermanentError:
# Not supported by server?
fmt = _DEFAULT_OVERVIEW_FMT[:]
else:
fmt = _parse_overview_fmt(lines)
self._cachedoverviewfmt = fmt
return fmt
def _grouplist(self, lines):
# Parse lines into "group last first flag"
return [GroupInfo(*line.split()) for line in lines]
def capabilities(self):
"""Process a CAPABILITIES command. Not supported by all servers.
Return:
- resp: server response if successful
- caps: a dictionary mapping capability names to lists of tokens
(for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] })
"""
caps = {}
resp, lines = self._longcmdstring("CAPABILITIES")
for line in lines:
name, *tokens = line.split()
caps[name] = tokens
return resp, caps
def newgroups(self, date, *, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of newsgroup names
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str)
resp, lines = self._longcmdstring(cmd, file)
return resp, self._grouplist(lines)
def newnews(self, group, date, *, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of message ids
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str)
return self._longcmdstring(cmd, file)
def list(self, group_pattern=None, *, file=None):
"""Process a LIST or LIST ACTIVE command. Arguments:
- group_pattern: a pattern indicating which groups to query
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)
"""
if group_pattern is not None:
command = 'LIST ACTIVE ' + group_pattern
else:
command = 'LIST'
resp, lines = self._longcmdstring(command, file)
return resp, self._grouplist(lines)
def _getdescriptions(self, group_pattern, return_all):
line_pat = re.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$')
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith('215'):
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, lines = self._longcmdstring('XGTITLE ' + group_pattern)
groups = {}
for raw_line in lines:
match = line_pat.search(raw_line.strip())
if match:
name, desc = match.group(1, 2)
if not return_all:
return desc
groups[name] = desc
if return_all:
return resp, groups
else:
# Nothing found
return ''
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
return self._getdescriptions(group, False)
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles
- first: first article number
- last: last article number
- name: the group name
"""
resp = self._shortcmd('GROUP ' + name)
if not resp.startswith('211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, int(count), int(first), int(last), name
def help(self, *, file=None):
"""Process a HELP command. Argument:
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of strings returned by the server in response to the
HELP command
"""
return self._longcmdstring('HELP', file)
def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id
def _statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self._shortcmd(line)
return self._statparse(resp)
def stat(self, message_spec=None):
"""Process a STAT command. Argument:
- message_spec: article number or message id (if not specified,
the current article is selected)
Returns:
- resp: server response if successful
- art_num: the article number
- message_id: the message id
"""
if message_spec:
return self._statcmd('STAT {0}'.format(message_spec))
else:
return self._statcmd('STAT')
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self._statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST')
def _artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, lines = self._longcmd(line, file)
resp, art_num, message_id = self._statparse(resp)
return resp, ArticleInfo(art_num, message_id, lines)
def head(self, message_spec=None, *, file=None):
"""Process a HEAD command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the headers in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of header lines)
"""
if message_spec is not None:
cmd = 'HEAD {0}'.format(message_spec)
else:
cmd = 'HEAD'
return self._artcmd(cmd, file)
def body(self, message_spec=None, *, file=None):
"""Process a BODY command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the body in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of body lines)
"""
if message_spec is not None:
cmd = 'BODY {0}'.format(message_spec)
else:
cmd = 'BODY'
return self._artcmd(cmd, file)
def article(self, message_spec=None, *, file=None):
"""Process an ARTICLE command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the article in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of article lines)
"""
if message_spec is not None:
cmd = 'ARTICLE {0}'.format(message_spec)
else:
cmd = 'ARTICLE'
return self._artcmd(cmd, file)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful
"""
return self._shortcmd('SLAVE')
def xhdr(self, hdr, str, *, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (nr, value) strings
"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file)
def remove_number(line):
m = pat.match(line)
return m.group(1, 2) if m else line
return resp, [remove_number(line) for line in lines]
def xover(self, start, end, *, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
"""
resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end),
file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def over(self, message_spec, *, file=None):
"""Process an OVER command. If the command isn't supported, fall
back to XOVER. Arguments:
- message_spec:
- either a message id, indicating the article to fetch
information about
- or a (start, end) tuple, indicating a range of article numbers;
if end is None, information up to the newest message will be
retrieved
- or None, indicating the current article number must be used
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
NOTE: the "message id" form isn't supported by XOVER
"""
cmd = 'OVER' if 'OVER' in self._caps else 'XOVER'
if isinstance(message_spec, (tuple, list)):
start, end = message_spec
cmd += ' {0}-{1}'.format(start, end or '')
elif message_spec is not None:
cmd = cmd + ' ' + message_spec
resp, lines = self._longcmdstring(cmd, file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def xgtitle(self, group, *, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
warnings.warn("The XGTITLE extension is not actively used, "
"use descriptions() instead",
DeprecationWarning, 2)
line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article
"""
warnings.warn("The XPATH extension is not actively used",
DeprecationWarning, 2)
resp = self._shortcmd('XPATH {0}'.format(id))
if not resp.startswith('223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date(self):
"""Process the DATE command.
Returns:
- resp: server response if successful
- date: datetime object
"""
resp = self._shortcmd("DATE")
if not resp.startswith('111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1]
if len(date) != 14:
raise NNTPDataError(resp)
return resp, _parse_datetime(date, None)
def _post(self, command, f):
resp = self._shortcmd(command)
# Raises a specific exception if posting is not allowed
if not resp.startswith('3'):
raise NNTPReplyError(resp)
if isinstance(f, (bytes, bytearray)):
f = f.splitlines()
# We don't use _putline() because:
# - we don't want additional CRLF if the file or iterable is already
# in the right format
# - we don't want a spurious flush() after each line is written
for line in f:
if not line.endswith(_CRLF):
line = line.rstrip(b"\r\n") + _CRLF
if line.startswith(b'.'):
line = b'.' + line
self.file.write(line)
self.file.write(b".\r\n")
self.file.flush()
return self._getresp()
def post(self, data):
"""Process a POST command. Arguments:
- data: bytes object, iterable or file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', data)
def ihave(self, message_id, data):
"""Process an IHAVE command. Arguments:
- message_id: message-id of the article
- data: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
self.file.close()
del self.file
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
try:
resp = self._shortcmd('QUIT')
finally:
self._close()
return resp
def login(self, user=None, password=None, usenetrc=True):
if self.authenticated:
raise ValueError("Already logged in.")
if not user and not usenetrc:
raise ValueError(
"At least one of `user` and `usenetrc` must be specified")
# If no login/password was specified but netrc was requested,
# try to get them from ~/.netrc
# Presume that if .netrc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(self.host)
if auth:
user = auth[0]
password = auth[2]
except OSError:
pass
# Perform NNTP authentication if needed.
if not user:
return
resp = self._shortcmd('authinfo user ' + user)
if resp.startswith('381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self._shortcmd('authinfo pass ' + password)
if not resp.startswith('281'):
raise NNTPPermanentError(resp)
# Capabilities might have changed after login
self._caps = None
self.getcapabilities()
# Attempt to send mode reader if it was requested after login.
# Only do so if we're not in reader mode already.
if self.readermode_afterauth and 'READER' not in self._caps:
self._setreadermode()
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
def _setreadermode(self):
try:
self.welcome = self._shortcmd('mode reader')
except NNTPPermanentError:
# Error 5xx, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if e.response.startswith('480'):
# Need authorization before 'mode reader'
self.readermode_afterauth = True
else:
raise
if _have_ssl:
def starttls(self, context=None):
"""Process a STARTTLS command. Arguments:
- context: SSL context to use for the encrypted connection
"""
# Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if
# a TLS session already exists.
if self.tls_on:
raise ValueError("TLS is already enabled.")
if self.authenticated:
raise ValueError("TLS cannot be started after authentication.")
resp = self._shortcmd('STARTTLS')
if resp.startswith('382'):
self.file.close()
self.sock = _encrypt_on(self.sock, context, self.host)
self.file = self.sock.makefile("rwb")
self.tls_on = True
# Capabilities may change after TLS starts up, so ask for them
# again.
self._caps = None
self.getcapabilities()
else:
raise NNTPError("TLS failed to start.")
class NNTP(_NNTPBase):
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
- usenetrc: allow loading username and password from ~/.netrc file
if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode, timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
if _have_ssl:
class NNTP_SSL(_NNTPBase):
def __init__(self, host, port=NNTP_SSL_PORT,
user=None, password=None, ssl_context=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
self.sock = _encrypt_on(self.sock, ssl_context, host)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode=readermode, timeout=timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
__all__.append("NNTP_SSL")
# Test retrieval when run as a script.
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="""\
nntplib built-in demo - display the latest articles in a newsgroup""")
parser.add_argument('-g', '--group', default='gmane.comp.python.general',
help='group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.org',
help='NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int,
help='NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int,
help='number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
def cut(s, lim):
if len(s) > lim:
s = s[:lim - 4] + "..."
return s
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print("{:7} {:20} {:42} ({})".format(
artnum, cut(author, 20), cut(subject, 42), lines)
)
s.quit()
| gpl-2.0 | 3,845,756,644,645,159,000 | 36.557105 | 96 | 0.579089 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.