repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
maas/maas | src/metadataserver/builtin_scripts/tests/test_builtin_scripts.py | 1 | 7811 | # Copyright 2017 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import copy
from datetime import timedelta
import random
from testtools.matchers import ContainsAll
from maasserver.models import ControllerInfo, VersionedTextFile
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
from maasserver.utils.orm import reload_object
from metadataserver.builtin_scripts import (
BUILTIN_SCRIPTS,
load_builtin_scripts,
)
from metadataserver.enum import SCRIPT_TYPE_CHOICES
from metadataserver.models import Script
from provisioningserver.refresh.node_info_scripts import NODE_INFO_SCRIPTS
class TestBuiltinScripts(MAASServerTestCase):
"""Test that builtin scripts get properly added and updated."""
def setUp(self):
super().setUp()
self.controller = factory.make_RegionRackController()
ControllerInfo.objects.set_version(self.controller, "3.0.0")
def test_creates_scripts(self):
load_builtin_scripts()
for script in BUILTIN_SCRIPTS:
script_in_db = Script.objects.get(name=script.name)
# While MAAS allows user scripts to leave these fields blank,
# builtin scripts should always have values.
self.assertTrue(script_in_db.title, script.name)
self.assertTrue(script_in_db.description, script.name)
self.assertTrue(script_in_db.script.data, script.name)
self.assertNotEqual([], script_in_db.tags, script.name)
# These values should always be set by the script loader.
self.assertEqual(
"Created by maas-3.0.0",
script_in_db.script.comment,
script.name,
)
self.assertTrue(script_in_db.default, script.name)
if (
script.name in NODE_INFO_SCRIPTS
and NODE_INFO_SCRIPTS[script.name]["run_on_controller"]
):
self.assertIn("deploy-info", script_in_db.tags)
else:
self.assertNotIn("deploy-info", script_in_db.tags)
def test_update_script(self):
load_builtin_scripts()
update_script_values = random.choice(BUILTIN_SCRIPTS)
script = Script.objects.get(name=update_script_values.name)
# Fields which we can update
orig_title = script.title
orig_description = script.description
orig_script_type = script.script_type
orig_results = script.results
orig_parameters = script.parameters
script.title = factory.make_string()
script.description = factory.make_string()
script.script_type = factory.pick_choice(SCRIPT_TYPE_CHOICES)
script.results = [factory.make_name("result")]
script.script.parameters = {
factory.make_name("param"): {"type": "storage"}
}
# Put fake old data in to simulate updating a script.
old_script = VersionedTextFile.objects.create(
data=factory.make_string()
)
script.script = old_script
# Change maas version
ControllerInfo.objects.set_version(self.controller, "3.0.1")
# User changeable fields.
user_tags = [factory.make_name("tag") for _ in range(3)]
script.tags = copy.deepcopy(user_tags)
user_timeout = timedelta(random.randint(0, 1000))
script.timeout = user_timeout
script.save()
load_builtin_scripts()
script = reload_object(script)
self.assertEqual(orig_title, script.title, script.name)
self.assertEqual(orig_description, script.description, script.name)
self.assertEqual(orig_script_type, script.script_type, script.name)
self.assertDictEqual(orig_results, script.results, script.name)
self.assertDictEqual(orig_parameters, script.parameters, script.name)
self.assertThat(script.tags, ContainsAll(user_tags))
self.assertEqual(user_timeout, script.timeout)
self.assertEqual(old_script, script.script.previous_version)
self.assertEqual("Updated by maas-3.0.1", script.script.comment)
self.assertTrue(script.default)
def test_update_removes_deploy_info_tag(self):
load_builtin_scripts()
script = (
Script.objects.filter(default=True)
.exclude(tags__contains=["deploy-info"])
.first()
)
script.add_tag("deploy-info")
# Put fake old data in to simulate updating a script.
old_script = VersionedTextFile.objects.create(
data=factory.make_string()
)
script.script = old_script
script.save()
load_builtin_scripts()
script = reload_object(script)
self.assertNotIn("deploy-info", script.tags)
def test_update_doesnt_revert_script(self):
load_builtin_scripts()
update_script_index = random.randint(0, len(BUILTIN_SCRIPTS) - 2)
update_script_values = BUILTIN_SCRIPTS[update_script_index]
script = Script.objects.get(name=update_script_values.name)
# Put fake new data in to simulate another MAAS region updating
# to a newer version.
new_script = factory.make_string()
script.script = script.script.update(new_script)
# Change maas version
ControllerInfo.objects.set_version(self.controller, "3.0.1")
# Fake user updates
user_tags = [factory.make_name("tag") for _ in range(3)]
script.tags = user_tags
user_timeout = timedelta(random.randint(0, 1000))
script.timeout = user_timeout
script.save()
# Test that subsequent scripts still get updated
second_update_script_values = BUILTIN_SCRIPTS[update_script_index + 1]
second_script = Script.objects.get(
name=second_update_script_values.name
)
# Put fake old data in to simulate updating a script.
orig_title = second_script.title
orig_description = second_script.description
orig_script_type = second_script.script_type
orig_results = second_script.results
orig_parameters = second_script.parameters
second_script.title = factory.make_string()
second_script.description = factory.make_string()
second_script.script_type = factory.pick_choice(SCRIPT_TYPE_CHOICES)
second_script.results = [factory.make_name("result")]
second_script.script.parameters = {
factory.make_name("param"): {"type": "storage"}
}
# Put fake old data in to simulate updating a script.
old_script = VersionedTextFile.objects.create(
data=factory.make_string()
)
second_script.script = old_script
second_script.save()
load_builtin_scripts()
script = reload_object(script)
self.assertEqual(update_script_values.name, script.name)
self.assertEqual(new_script, script.script.data)
self.assertTrue(min([tag in script.tags for tag in user_tags]))
self.assertEqual(user_timeout, script.timeout)
self.assertTrue(script.default)
second_script = reload_object(second_script)
self.assertEqual(orig_title, second_script.title)
self.assertEqual(orig_description, second_script.description)
self.assertEqual(orig_script_type, second_script.script_type)
self.assertDictEqual(orig_results, second_script.results)
self.assertDictEqual(orig_parameters, second_script.parameters)
self.assertEqual(old_script, second_script.script.previous_version)
self.assertEqual("Updated by maas-3.0.1", second_script.script.comment)
self.assertTrue(second_script.default)
| agpl-3.0 | 4,398,223,400,142,624,300 | 38.852041 | 79 | 0.656894 | false |
runt18/nupic | src/nupic/support/configuration_base.py | 1 | 13853 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import with_statement
import os
import logging
from xml.etree import ElementTree
from pkg_resources import resource_string
# Turn on additional print statements
DEBUG = False
DEFAULT_CONFIG = "nupic-default.xml"
USER_CONFIG = "nupic-site.xml"
CUSTOM_CONFIG = "nupic-custom.xml"
def _getLogger():
logger = logging.getLogger("com.numenta.nupic.tools.configuration_base")
if DEBUG:
logger.setLevel(logging.DEBUG)
return logger
class Configuration(object):
""" This class can be used to fetch NuPic configuration settings which are
stored in one or more XML files.
If the environment variable 'NTA_CONF_PATH' is defined, then the configuration
files are expected to be in the NTA_CONF_PATH search path, which is a ':'
separated list of directories (on Windows the seperator is a ';').
If NTA_CONF_PATH is not defined, then it is loaded via pkg_resources.
"""
# Once we read in the properties, they are stored in this dict
_properties = None
# This stores the paths we search for config files. It can be modified through
# the setConfigPaths() method.
_configPaths = None
# Any environment variable prefixed with this string serves as an override
# to property defined in the current configuration
envPropPrefix = 'NTA_CONF_PROP_'
@classmethod
def getString(cls, prop):
""" Retrieve the requested property as a string. If property does not exist,
then KeyError will be raised.
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as a string
"""
if cls._properties is None:
cls._readStdConfigFiles()
# Allow configuration properties to be overridden via environment variables
envValue = os.environ.get("{0!s}{1!s}".format(cls.envPropPrefix,
prop.replace('.', '_')), None)
if envValue is not None:
return envValue
return cls._properties[prop]
@classmethod
def getBool(cls, prop):
""" Retrieve the requested property and return it as a bool. If property
does not exist, then KeyError will be raised. If the property value is
neither 0 nor 1, then ValueError will be raised
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as bool
"""
value = cls.getInt(prop)
if value not in (0, 1):
raise ValueError("Expected 0 or 1, but got {0!r} in config property {1!s}".format(
value, prop))
return bool(value)
@classmethod
def getInt(cls, prop):
""" Retrieve the requested property and return it as an int. If property
does not exist, then KeyError will be raised.
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as int
"""
return int(cls.getString(prop))
@classmethod
def getFloat(cls, prop):
""" Retrieve the requested property and return it as a float. If property
does not exist, then KeyError will be raised.
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as float
"""
return float(cls.getString(prop))
@classmethod
def get(cls, prop, default=None):
""" Get the value of the given configuration property as string. This
returns a string which is the property value, or the value of "default" arg
if the property is not found. Use Configuration.getString() instead.
NOTE: it's atypical for our configuration properties to be missing - a
missing configuration property is usually a very serious error. Because
of this, it's preferable to use one of the getString, getInt, getFloat,
etc. variants instead of get(). Those variants will raise KeyError when
an expected property is missing.
Parameters:
----------------------------------------------------------------
prop: name of the property
default: default value to return if property does not exist
retval: property value (as a string), or default if the property does
not exist.
"""
try:
return cls.getString(prop)
except KeyError:
return default
@classmethod
def set(cls, prop, value):
""" Set the value of the given configuration property.
Parameters:
----------------------------------------------------------------
prop: name of the property
value: value to set
"""
if cls._properties is None:
cls._readStdConfigFiles()
cls._properties[prop] = str(value)
@classmethod
def dict(cls):
""" Return a dict containing all of the configuration properties
Parameters:
----------------------------------------------------------------
retval: dict containing all configuration properties.
"""
if cls._properties is None:
cls._readStdConfigFiles()
# Make a copy so we can update any current values obtained from environment
# variables
result = dict(cls._properties)
keys = os.environ.keys()
replaceKeys = filter(lambda x: x.startswith(cls.envPropPrefix),
keys)
for envKey in replaceKeys:
key = envKey[len(cls.envPropPrefix):]
key = key.replace('_', '.')
result[key] = os.environ[envKey]
return result
@classmethod
def readConfigFile(cls, filename, path=None):
""" Parse the given XML file and store all properties it describes.
Parameters:
----------------------------------------------------------------
filename: name of XML file to parse (no path)
path: path of the XML file. If None, then use the standard
configuration search path.
"""
properties = cls._readConfigFile(filename, path)
# Create properties dict if necessary
if cls._properties is None:
cls._properties = dict()
for name in properties:
if 'value' in properties[name]:
cls._properties[name] = properties[name]['value']
@classmethod
def _readConfigFile(cls, filename, path=None):
""" Parse the given XML file and return a dict describing the file.
Parameters:
----------------------------------------------------------------
filename: name of XML file to parse (no path)
path: path of the XML file. If None, then use the standard
configuration search path.
retval: returns a dict with each property as a key and a dict of all
the property's attributes as value
"""
outputProperties = dict()
# Get the path to the config files.
if path is None:
filePath = cls.findConfigFile(filename)
else:
filePath = os.path.join(path, filename)
# ------------------------------------------------------------------
# Read in the config file
try:
if filePath is not None:
try:
# Use warn since console log level is set to warning
_getLogger().debug("Loading config file: %s", filePath)
with open(filePath, 'r') as inp:
contents = inp.read()
except Exception:
raise RuntimeError("Expected configuration file at {0!s}".format(filePath))
else:
# If the file was not found in the normal search paths, which includes
# checking the NTA_CONF_PATH, we'll try loading it from pkg_resources.
try:
contents = resource_string("nupic.support", filename)
except Exception as resourceException:
# We expect these to be read, and if they don't exist we'll just use
# an empty configuration string.
if filename in [USER_CONFIG, CUSTOM_CONFIG]:
contents = '<configuration/>'
else:
raise resourceException
elements = ElementTree.XML(contents)
if elements.tag != 'configuration':
raise RuntimeError("Expected top-level element to be 'configuration' "
"but got '%s'" % (elements.tag))
# ------------------------------------------------------------------
# Add in each property found
propertyElements = elements.findall('./property')
for propertyItem in propertyElements:
propInfo = dict()
# Parse this property element
propertyAttributes = list(propertyItem)
for propertyAttribute in propertyAttributes:
propInfo[propertyAttribute.tag] = propertyAttribute.text
# Get the name
name = propInfo.get('name', None)
# value is allowed to be empty string
if 'value' in propInfo and propInfo['value'] is None:
value = ''
else:
value = propInfo.get('value', None)
if value is None:
if 'novalue' in propInfo:
# Placeholder "novalue" properties are intended to be overridden
# via dynamic configuration or another configuration layer.
continue
else:
raise RuntimeError("Missing 'value' element within the property "
"element: => %s " % (str(propInfo)))
# The value is allowed to contain substitution tags of the form
# ${env.VARNAME}, which should be substituted with the corresponding
# environment variable values
restOfValue = value
value = ''
while True:
# Find the beginning of substitution tag
pos = restOfValue.find('${env.')
if pos == -1:
# No more environment variable substitutions
value += restOfValue
break
# Append prefix to value accumulator
value += restOfValue[0:pos]
# Find the end of current substitution tag
varTailPos = restOfValue.find('}', pos)
if varTailPos == -1:
raise RuntimeError("Trailing environment variable tag delimiter '}'"
" not found in %r" % (restOfValue))
# Extract environment variable name from tag
varname = restOfValue[pos+6:varTailPos]
if varname not in os.environ:
raise RuntimeError("Attempting to use the value of the environment"
" variable %r, which is not defined" % (varname))
envVarValue = os.environ[varname]
value += envVarValue
restOfValue = restOfValue[varTailPos+1:]
# Check for errors
if name is None:
raise RuntimeError("Missing 'name' element within following property "
"element:\n => %s " % (str(propInfo)))
propInfo['value'] = value
outputProperties[name] = propInfo
return outputProperties
except Exception:
_getLogger().exception("Error while parsing configuration file: %s.",
filePath)
raise
@classmethod
def clear(cls):
""" Clear out the entire configuration.
"""
cls._properties = None
cls._configPaths = None
@classmethod
def findConfigFile(cls, filename):
""" Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
Parameters:
----------------------------------------------------------------
filename: name of file to locate
"""
paths = cls.getConfigPaths()
for p in paths:
testPath = os.path.join(p, filename)
if os.path.isfile(testPath):
return os.path.join(p, filename)
@classmethod
def getConfigPaths(cls):
""" Return the list of paths to search for configuration files.
Parameters:
----------------------------------------------------------------
retval: list of paths.
"""
configPaths = []
if cls._configPaths is not None:
return cls._configPaths
else:
if 'NTA_CONF_PATH' in os.environ:
configVar = os.environ['NTA_CONF_PATH']
# Return as a list of paths
configPaths = configVar.split(os.pathsep)
return configPaths
@classmethod
def setConfigPaths(cls, paths):
""" Modify the paths we use to search for configuration files.
Parameters:
----------------------------------------------------------------
paths: list of paths to search for config files.
"""
cls._configPaths = list(paths)
@classmethod
def _readStdConfigFiles(cls):
""" Read in all standard configuration files
"""
# Default one first
cls.readConfigFile(DEFAULT_CONFIG)
# Site specific one can override properties defined in default
cls.readConfigFile(USER_CONFIG)
| agpl-3.0 | 9,052,683,665,392,102,000 | 31.141531 | 88 | 0.591208 | false |
MayuPuter/pimouse_ros | test/travis_test_buzzer.py | 1 | 2076 | #!/usr/bin/env python
#encoding: utf8
import rospy, unittest, rostest, actionlib
from pimouse_ros.msg import MusicAction, MusicResult, MusicFeedback, MusicGoal
import rosnode
import time
from std_msgs.msg import UInt16
class BuzzerTest(unittest.TestCase):
def setUp(self):
self.client = actionlib.SimpleActionClient("music", MusicAction)
self.device_values = []
def test_node_exist(self):
nodes = rosnode.get_node_names()
self.assertIn('/buzzer', nodes, "node does not exist")
def test_put_values(self):
pub = rospy.Publisher('/buzzer', UInt16)
for i in range(10):
pub.publish(1234)
time.sleep(0.1)
with open("/dev/rtbuzzer0", "r") as f:
data = f.readline()
self.assertEqual(data, "1234\n", "value does not written to rtbuzzer0")
def test_put_values(self):
pub = rospy.Publisher('/buzzer', UInt16)
for i in range(10):
pub.publish(1234)
time.sleep(0.1)
with open("/dev/rtbuzzer0", "r") as f:
data = f.readline()
self.assertEqual(data, "1234\n", "value does not written to rtbuzzer0")
def test_music(self):
goal = MusicGoal()
goal.freqs = [100, 200, 300, 0]
goal.durations = [2, 2, 2, 2]
self.client.wait_for_server()
self.client.send_goal(goal, feedback_cb=self.feedback_cb)
self.client.wait_for_result()
self.assertTrue(self.client.get_result(), "invalid result")
self.assertEqual(goal.freqs, self.device_values, "invalid feedback:" + ",".join([str(e) for e in self.device_values]))
### preemption ###
self.device_values = []
self.client.send_goal(goal, feedback_cb=self.feedback_cb)
self.client.wait_for_result(rospy.Duration.from_sec(0.5))
self.assertFalse(self.client.get_result(), "stop is requested but return true")
self.assertFalse(goal.freqs == self.device_values, "not stopped")
def feedback_cb(self, feedback):
with open("/dev/rtbuzzer0", "r") as f:
data = f.readline()
self.device_values.append(int(data.rstrip()))
if __name__ == '__main__':
time.sleep(3)
rospy.init_node('travis_test_buzzer')
rostest.rosrun('pimouse_ros', 'travis_test_buzzer', BuzzerTest)
| gpl-3.0 | -2,155,820,610,549,270,000 | 29.985075 | 120 | 0.694123 | false |
misterwindupbird/IBO | ego/gaussianprocess/__init__.py | 1 | 18612 | #!/usr/bin/python
# Copyright (C) 2010, 2011 by Eric Brochu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# from numpy import array, sqrt, nan, exp, arange
# from matplotlib.pylab import figure, subplot, plot, clf, poly_between, axvline
# import numpy.random as numpyrandom
# from random import sample, random
import ctypes
import ctypes.util
from ctypes import cdll, POINTER, c_int, c_double, c_char_p
from time import time
# from copy import copy
import pdb
from numpy import *
from numpy.linalg import inv, LinAlgError
from scipy.optimize import fmin_bfgs
from .kernel import GaussianKernel_ard, GaussianKernel_iso, MaternKernel3, MaternKernel5
#############################################################################
# this implementation of erf, cdf and pdf is substantially faster than
# the scipy implementation (a C implementation would probably be faster yet)
#############################################################################
#
# from: http://www.cs.princeton.edu/introcs/21function/ErrorFunction.java.html
# Implements the Gauss error function.
# erf(z) = 2 / sqrt(pi) * integral(exp(-t*t), t = 0..z)
#
# fractional error in math formula less than 1.2 * 10 ^ -7.
# although subject to catastrophic cancellation when z in very close to 0
# from Chebyshev fitting formula for erf(z) from Numerical Recipes, 6.2
def erf(z):
t = 1.0 / (1.0 + 0.5 * abs(z))
# use Horner's method
ans = 1 - t * exp( -z*z - 1.26551223 +
t * ( 1.00002368 +
t * ( 0.37409196 +
t * ( 0.09678418 +
t * (-0.18628806 +
t * ( 0.27886807 +
t * (-1.13520398 +
t * ( 1.48851587 +
t * (-0.82215223 +
t * ( 0.17087277))))))))))
if z >= 0.0:
return ans
else:
return -ans
def CDF(x):
return 0.5 * (1 + erf((x) * 0.707106))
def PDF(x):
return exp(-(x**2/2)) * 0.398942
class GaussianProcess(object):
def __init__(self, kernel, X=None, Y=None, prior=None, noise=.1, gnoise=1e-4, G=None):
"""
Initialize a Gaussian Process.
@param kernel: kernel object to use
@param prior: object defining the GP prior on the mean. must
be a descendant of GPMeanPrior
@param noise: noise hyperparameter sigma^2_n
@param X: initial training data
@param Y: initial observations
"""
self.kernel = kernel
self.prior = prior
self.noise = noise
self.gnoise = array(gnoise, ndmin=1)
self.R = None
if (X is None and Y is not None) or (X is not None and Y is None):
raise ValueError
self.X = zeros((0,0))
self.Y = zeros((0))
self.G = None
self.name = 'GP' # for legend
self.starttime = time() # for later analysis
if X is not None:
self.addData(X, Y)
self.augR = None
self.augL = None
self.augX = None
# mostly for testing/logging
self.selected = None
self.endtime = None
# if prior is None:
# print 'prior is None'
# else:
# print 'prior is NOT None'
#
# if self.prior is None:
# print 'self.prior is None'
# else:
# print 'self.prior is NOT None'
def _computeCorrelations(self, X):
""" compute correlations between data """
M, (N,D) = len(self.X), X.shape
r = eye(N, dtype=float) + self.noise
m = empty((M,N))
for i in xrange(N):
for j in xrange(i):
r[i,j] = r[j,i] = self.kernel.cov(X[i], X[j])
for i in xrange(M):
for j in xrange(N):
m[i,j] = self.kernel.cov(self.X[i], X[j])
return r, m
def _computeAugCorrelations(self, X):
""" compute correlations between data """
M, (N,D) = len(self.augX), X.shape
r = eye(N, dtype=float) + self.noise
m = empty((M,N))
for i in xrange(N):
for j in xrange(i):
r[i,j] = r[j,i] = self.kernel.cov(X[i], X[j])
for i in xrange(M):
for j in xrange(N):
m[i,j] = self.kernel.cov(self.augX[i], X[j])
return r, m
def posterior(self, X, getvar=True):
""" Get posterior mean and variance for a point X. """
if len(self.X)==0:
if self.prior is None:
if getvar:
return 0.0, 1.0
else:
return 0.0
else:
if getvar:
return self.prior.mu(X), 1.0
else:
return self.prior.mu(X)
X = array(X, copy=False, dtype=float, ndmin=2)
M, (N,D) = len(self.X), X.shape
m = 0.0
if self.prior is not None:
m = self.prior.mu(X)
assert isscalar(m)
if self.G is None:
# NO GRADIENT DATA.
d = self.Y-m
r = empty((M, N))
for i in xrange(M):
for j in xrange(N):
r[i,j] = self.kernel.cov(self.X[i], X[j])
else:
# WITH GRADIENT DATA.
d = hstack(map(hstack, zip(self.Y-m, self.G)))
r = empty((M*(D+1), N))
for i in xrange(M):
for j in xrange(N):
A = i*(D+1)
cov = self.kernel.covWithGradients(self.X[i], X[j])
r[A:A+D+1,j] = cov[:,0]
# calculate the mean.
Lr = linalg.solve(self.L, r)
mu = m + dot(Lr.T, linalg.solve(self.L,d))
if getvar:
# calculate the variance.
if self.augL is None:
sigma2 = (1 + self.noise) - sum(Lr**2, axis=0)
else:
M, (N,D) = len(self.augX), X.shape
r = empty((M, N))
for i in xrange(M):
for j in xrange(N):
r[i,j] = self.kernel.cov(self.augX[i], X[j])
Lr = linalg.solve(self.augL, r)
sigma2 = (1 + self.noise) - sum(Lr**2, axis=0)
sigma2 = clip(sigma2, 10e-8, 10)
return mu[0], sigma2[0]
else:
return mu[0]
def posteriors(self, X):
"""
get arrays of posterior values for the array in X
"""
M = []
V = []
for x in X:
if isscalar(x):
m, v = self.posterior(array([x]))
else:
m, v = self.posterior(x)
M.append(m)
V.append(v)
return array(M), array(V)
def mu(self, x):
"""
get posterior mean for a point x
NOTE: if you are getting the variance as well, this is less efficient
than using self.posterior()
"""
return self.posterior(x, getvar=False)
def negmu(self, x):
"""
needed occasionally for optimization
"""
nm = -self.mu(x)
# if self.prior is not None and len(self.X)==0:
# print 'no data, using prior = %.4f'%nm
return nm
def addData(self, X, Y, G=None):
"""
Add new data to model and update.
We assume that X is an (N,D)-array, Y is an N-vector, and G is either
an (N,D)-array or None. Further, if X or G are a single D-dimensional
vector these will be interpreted as (1,D)-arrays, i.e. one observation.
"""
X = array(X, copy=False, dtype=float, ndmin=2)
Y = array(Y, copy=False, dtype=float, ndmin=1).flatten()
G = array(G, copy=False, dtype=float, ndmin=2) if (G is not None) else None
assert len(Y) == len(X), 'wrong number of Y-observations given'
assert G is None or G.shape == X.shape, 'wrong number (or dimensionality) of gradient-observations given'
# print '(', len(self.X), self.G, G, ')'
# assert not (len(self.X) > 0 and self.G is not None and G is None), 'gradients must either be always or never given'
# this just makes sure that if we used the default gradient noise for
# each dimension it gets lengthened to the proper size.
if len(self.X) == 0 and len(self.gnoise) == 1:
self.gnoise = tile(self.gnoise, X.shape[1])
# compute the correlations between our data points.
r, m = \
self._computeCorrelations(X) if (G is None) else \
self._computeCorrelationsWithGradients(X)
if len(self.X) == 0:
self.X = copy(X)
self.Y = copy(Y)
self.G = copy(G) if (G is not None) else None
self.R = r
self.L = linalg.cholesky(self.R)
else:
self.X = r_[self.X, X]
self.Y = r_[self.Y, Y]
self.G = r_[self.G, G] if (G is not None) else None
self.R = r_[c_[self.R, m], c_[m.T, r]]
z = linalg.solve(self.L, m)
d = linalg.cholesky(r - dot(z.T, z))
self.L = r_[c_[self.L, zeros(z.shape)], c_[z.T, d]]
# print '\nself.G =', G, ', for which selfG is None is', (self.G is None)
def getYfromX(self, qx):
"""
get the (first) Y value for a given training datum X. return None if x
is not found.
"""
for x, y in zip(self.X, self.Y):
if all(qx==x):
return y
return None
def done(self, x):
"""
indication that the GP has been terminated and that a final point has
been selected (mostly relevant for logging)
"""
self.selected = x
self.endtime = time()
class PrefGaussianProcess(GaussianProcess):
"""
Like a regular Gaussian Process, but trained on preference data. Note
that you cannot (currently) add non-preference data. This is because I
haven't gotten around to it, not because it's impossible.
"""
def __init__(self, kernel, prefs=None, **kwargs):
super(PrefGaussianProcess, self).__init__(kernel, **kwargs)
self.preferences = []
self.C = None
if prefs is not None:
self.addPreferences(prefs)
def addPreferences(self, prefs, useC=True, showPrefLikelihood=False):
"""
Add a set of preferences to the GP and update.
@param prefs: sequence of preference triples (xv, xu, d) where xv
is a datum preferred to xu and d is the degree of
preference (0 = 'standard', 1 = 'greatly preferred')
"""
def S(x, prefinds, L, useC=False):
"""
the MAP functional to be minimized
"""
# print '***** x =',
# for xx in x:
# print '%.3f'%xx,
if useC:
lpath = ctypes.util.find_library('ego')
lib = cdll[lpath]
lib.logCDFs.restype = c_double
lib.logCDFs.argtypes = [c_int, POINTER(c_int), POINTER(c_double)]
pf = array(prefinds[:], dtype=c_int).reshape(-1)
cx = array(x.copy(), dtype=c_double)
result = lib.logCDFs(c_int(len(pf)), pf.ctypes.data_as(POINTER(c_int)), cx.ctypes.data_as(POINTER(c_double)))
logCDFs = result
else:
logCDFs = 0.
sigma = 1
epsilon = 1e-10
Z = sqrt(2) * sigma
for v, u, d in prefinds:
logCDFs += (d+1) * log(CDF((x[v]-x[u])/Z)+epsilon)
Lx = linalg.solve(L, x)
val = -logCDFs + dot(Lx, Lx)/2
if not isfinite(val):
print 'non-finite val!'
pdb.set_trace()
# print '\n***** val =', val
return val
# add new preferences
self.preferences.extend(prefs)
x2ind = {}
ind = 0
prefinds = []
vs = set()
for v, u, d in self.preferences:
v = tuple(v)
vs.add(v)
u = tuple(u)
if v not in x2ind:
x2ind[v] = ind
ind += 1
if u not in x2ind:
x2ind[u] = ind
ind += 1
prefinds.append((x2ind[v], x2ind[u], d))
newX = array([x for x, _ in sorted(x2ind.items(), key=lambda x:x[1])])
# use existing Ys as starting point for optimizer
lastY = {}
for x, y in zip(self.X, self.Y):
lastY[tuple(x)] = y
if len(self.Y) > 0:
ymax = max(self.Y)
ymin = min(self.Y)
else:
ymax = .5
ymin = -.5
start = []
for x in newX:
if tuple(x) in lastY:
start.append(lastY[tuple(x)])
else:
if tuple(x) in vs:
start.append(ymax)
else:
start.append(ymin)
# update X, R
self.X = zeros((0,0))
r, m = \
self._computeCorrelations(newX)
self.X = newX
self.R = r
self.L = linalg.cholesky(self.R)
# optimize S to find a good Y
# self.Y = fmin_bfgs(S, start, args=(prefinds, self.L), epsilon=0.1, maxiter=30, disp=0)
self.Y = fmin_bfgs(S, start, args=(prefinds, self.L), disp=0)
# print '[addPreferences] checking pref pairs'
for r, c, _ in self.preferences:
r = tuple(r)
c = tuple(c)
# print '%d %d' % (x2ind[r], x2ind[c]),
if self.Y[x2ind[r]] <= self.Y[x2ind[c]]:
# print ' FAILED! %.2f ! > %.2f' % (self.Y[x2ind[r]], self.Y[x2ind[c]])
# print ' can we fix it?'
# if there is nothing preferred to this item, bump it up
for r1, c1, _ in self.preferences:
if all(c1==r):
break
else:
self.Y[x2ind[r]] = self.Y[x2ind[c]] + .1
# print ' changed Y to %.2f' % self.Y[x2ind[r]]
# now we can learn the C matrix
self.C = eye(len(self.X), dtype=float) * 5
# self.C = zeros((len(self.X), len(self.X)))
for i in xrange(len(self.X)):
for j in xrange(len(self.X)):
for r, c, _ in self.preferences:
# print '******', r,c
alpha = 0
if all(r==self.X[i]) and all(c==self.X[j]):
alpha = -1
elif all(r==self.X[j]) and all(c==self.X[i]):
alpha = -1
elif all(r==self.X[i]) and i==j:
alpha = 1
elif all(c==self.X[i]) and i==j:
alpha = 1
if alpha != 0:
# print 'have an entry for %d, %d!' % (i,j)
d = (self.mu(r)-self.mu(c)) / (sqrt(2)*sqrt(self.noise))
# print '\td=',d
cdf = CDF(d)
pdf = PDF(d)
if cdf < 1e-10:
cdf = 1e-10
if pdf < 1e-10:
pdf = 1e-10
self.C[i,j] += alpha / (2*self.noise) * (pdf**2/cdf**2 + d * pdf/cdf)
try:
self.L = linalg.cholesky(self.R+linalg.inv(self.C))
except LinAlgError:
print '[addPreferences] GP.C matrix is ill-conditioned, adding regularizer delta = 1'
for i in xrange(10):
self.C += eye(len(self.X))
try:
self.L = linalg.cholesky(self.R+linalg.inv(self.C))
except LinAlgError:
print '[addPreferences] GP.C matrix is ill-conditioned, adding regularizer delta = %d' % (i+2)
else:
break
def addObservationPoint(self, X):
"""
Add a point at which we will observe, but for which we don't have the
observation yet. (Used by the gallery selection algorithms.)
"""
X = array(X, copy=False, dtype=float, ndmin=2)
if self.augR is None:
self.augR = self.R.copy()
self.augX = self.X.copy()
r, m = self._computeAugCorrelations(X)
self.augR = r_[c_[self.augR, m], c_[m.T, r]]
invC = zeros_like(self.augR)
invC[:self.C.shape[0], :self.C.shape[0]] = linalg.inv(self.C)
self.augL = linalg.cholesky(self.augR+invC)
self.augX = r_[self.augX, X]
def addData(self, X, Y, G=None):
"""
I have an idea about how to do this... (see notebook).
"""
raise NotImplementedError("can't (yet) add explicit ratings to preference GP")
| mit | -5,203,679,369,636,270,000 | 34.183365 | 125 | 0.481356 | false |
Robbie1977/AlignmentPipe | warp.py | 1 | 4075 | import datetime
import gc
import time
import cmtk
from cmtk import cur, tempfolder, active, run_stage, template, checkDir, host, templatedir
def warpRec(record, template=template, bgfile='image_Ch1.nrrd',
warpSet='--grid-spacing 80 --exploration 30 --coarsest 4 --accuracy 0.2 --refine 4 --energy-weight 1e-1'):
start = datetime.datetime.now()
record = checkDir(record)
print 'Staring warping alignment for: ' + record['name']
# bgfile = record['original_nrrd'][('Ch' + str(record['background_channel']) + '_file')]
warp, r = cmtk.warp(bgfile, template=template, settings=warpSet)
totaltime = datetime.datetime.now() - start
record['alignment_stage'] = 5
if r > 0:
record['alignment_stage'] = 1004
else:
if record['notes'] is None:
record['notes'] = time.strftime(
"%c") + ' Warp alignment performed by ' + host + ' in ' + str(totaltime)
else:
record['notes'] = record['notes'] + '\n' + time.strftime(
"%c") + ' Warp alignment performed by ' + host + ' in ' + str(totaltime)
if r == 99: record['alignment_stage'] = 2
record['max_stage'] = 5
record['last_host'] = host
return record
def warp(name, template=template, bgfile='image_Ch1.nrrd',
warpSet='--grid-spacing 80 --exploration 30 --coarsest 4 --accuracy 0.2 --refine 4 --energy-weight 1e-1'):
cur.execute("SELECT * FROM images_alignment WHERE alignment_stage = 4 AND name like %s", [name])
records = cur.fetchall()
key = []
for desc in cur.description:
key.append(desc[0])
for line in records:
record = dict(zip(key, line))
# clear old failed alignments:
cur.execute("UPDATE images_alignment SET alignment_stage = 4 WHERE last_host = %s AND alignment_stage = 1004",
[str(host)])
cur.connection.commit()
# remove image from stack before processing:
cur.execute("UPDATE images_alignment SET alignment_stage = 1004, last_host = %s WHERE id = %s ",
[str(host), str(record['id'])])
cur.connection.commit()
record = warpRec(record, template, bgfile, warpSet)
u = str(record['id']) + ' -> '
for k, v in record.items():
if not (k == 'id' or v == None or v == 'None'):
cur.execute("UPDATE images_alignment SET " + str(k) + "=%s WHERE id = %s ", [v, record['id']])
u = u + str(k) + '=' + str(v) + ', '
print u
cur.connection.commit()
gc.collect()
if __name__ == "__main__":
if active and '4' in run_stage:
cur.execute(
"SELECT images_alignment.name, system_template.file, images_original_nrrd.file, system_setting.cmtk_warp_var FROM images_alignment, system_template, system_setting, images_original_nrrd WHERE alignment_stage = 4 AND images_original_nrrd.channel = images_alignment.background_channel AND images_original_nrrd.image_id = images_alignment.id AND images_alignment.settings_id = system_setting.id AND system_setting.template_id = system_template.id ORDER BY images_alignment.id")
records = cur.fetchall()
total = len(records)
if total == 0:
cur.execute(
"UPDATE images_alignment SET alignment_stage = 4 FROM (SELECT id FROM images_alignment WHERE alignment_stage = 2004 ORDER BY id LIMIT 2) s WHERE s.id = images_alignment.id")
cur.connection.commit()
gc.collect()
count = 0
print records
for line in records:
count += 1
print 'Warp alignment: ' + str(count) + ' of ' + str(total)
warp(line[0], template=(templatedir + line[1]), bgfile=(tempfolder + line[2]), warpSet=line[3])
# clear old failed alignments:
cur.execute("UPDATE images_alignment SET alignment_stage = 4 WHERE last_host = %s AND alignment_stage = 1004",
[str(host)])
cur.connection.commit()
print 'done'
else:
print 'inactive or stage 4 not selected'
| mit | 6,924,095,669,354,803,000 | 47.511905 | 486 | 0.605644 | false |
ashgan-dev/mailthon | docs/conf.py | 2 | 9934 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Mailthon documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 5 17:52:58 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'mailthonext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Mailthon'
copyright = '2015, Eeo Jun'
author = 'Eeo Jun'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'mailthonext.JinjaStyle'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'mailthon'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {
# 'github_button': 'true',
# 'github_user': 'eugene-eeo',
# 'github_repo': 'mailthon',
# 'travis_button': 'true',
# 'font_family': 'Tahoma, Arial, sans-serif',
# 'head_font_family': 'Tahoma, Arial, sans-serif',
# 'link': '#aa0000',
# 'link_hover': '#dd0000',
# 'sidebar_text': 'Mailthon is an email library for Python',
# 'show_powered_by': False,
# 'logo_name': 'true',
#}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mailthondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Mailthon.tex', 'Mailthon Documentation',
'Eeo Jun', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mailthon', 'Mailthon Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Mailthon', 'Mailthon Documentation',
author, 'Mailthon', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit | -1,864,183,566,260,073,200 | 31.148867 | 79 | 0.701228 | false |
thekingofkings/urban-flow-analysis | python/featureCorrelations.py | 2 | 9774 | # -*- coding: utf-8 -*-
"""
Feature correlation analysis
Created on Sun Jan 31 21:09:18 2016
@author: kok
"""
import numpy as np
from FeatureUtils import *
from Crime import Tract
from foursquarePOI import getFourSquarePOIDistribution, getFourSquarePOIDistributionHeader
from scipy.stats import pearsonr
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pylab import rcParams
rcParams['figure.figsize'] = (4*1.6,3*1.7)
rcParams['pdf.fonttype'] = 42
rcParams['ps.fonttype'] = 42
def correlation_POIdist_crime():
"""
we calculate the correlation between POI distribution and crime for each
community area(CA).
Within each CA, the crime count is number of crime in each tract.
The POI count is number of POIs in each tract.
"""
tracts = Tract.createAllTractObjects()
ordkey = sorted(tracts.keys())
CAs = {}
for key, val in tracts.items():
if val.CA not in CAs:
CAs[val.CA] = [key]
else:
CAs[val.CA].append(key)
Y = retrieve_crime_count(2010, col=['total'], region='tract')
poi_dist = getFourSquarePOIDistribution(gridLevel='tract')
Pearson = {}
for cakey, calist in CAs.items():
crime = []
pois = []
for tractkey in calist:
crime.append(Y[tractkey])
pois.append(poi_dist[ordkey.index(tractkey)])
# calculate correlation
pois = np.array(pois)
crime = np.array(crime)
pearson = []
for i in range(pois.shape[1]):
r = np.vstack( (pois[:,i], crime) )
pearson.append( np.corrcoef(r)[0,1] )
Pearson[cakey] = np.nan_to_num( pearson )
P = []
for key in range(1, 78):
P.append(Pearson[key])
np.savetxt("../R/poi_correlation_ca.csv", P, delimiter=",")
return np.array(P)
def correlation_POI_crime(gridLevel='tract', poiRatio=False):
"""
calculate correlation for different POI category
"""
Y = retrieve_crime_count(2010, col=['total'], region=gridLevel)
h, D = generate_corina_features(region='ca')
popul = D[:,0].reshape(D.shape[0],1)
poi_dist = getFourSquarePOIDistribution(gridLevel=gridLevel, useRatio=poiRatio)
cate_label = ['Food', 'Residence', 'Travel', 'Arts & Entertainment',
'Outdoors & Recreation', 'College & Education', 'Nightlife',
'Professional', 'Shops', 'Event']
if gridLevel == 'tract':
tracts = Tract.createAllTractObjects()
ordkey = sorted(tracts.keys())
crime = []
pois = []
for tractkey in ordkey:
crime.append(Y[tractkey])
pois.append(poi_dist[ordkey.index(tractkey)])
pois = np.array(pois)
crime = np.array(crime)
for i in range(pois.shape[1]):
r = np.vstack( (pois[:,i], crime) )
pcc = np.corrcoef(r)[0,1]
print pcc
elif gridLevel == 'ca':
Y = np.divide(Y, popul) * 10000
Y = Y.reshape( (len(Y),) )
poi_dist = np.transpose(poi_dist)
for i in range(poi_dist.shape[0]):
poi = np.reshape(poi_dist[i,:], Y.shape )
r, p = pearsonr(poi, Y)
print cate_label[i], r, p
def line_POI_crime():
d = getFourSquarePOIDistribution(gridLevel='ca')
y = retrieve_crime_count(2010, col=['total'], region='ca')
h, D = generate_corina_features(region='ca')
popul = D[:,0].reshape(D.shape[0],1)
hd = getFourSquarePOIDistributionHeader()
yhat = np.divide(y, popul) * 10000
for i in range(6,8):
plt.figure()
plt.scatter(d[:,i], y)
plt.xlim(0, 1000)
plt.xlabel('POI count -- {0} category'.format(hd[i]))
plt.ylabel('Crime count')
plt.figure()
plt.scatter(d[:,i], yhat)
plt.xlim(0, 1000)
plt.xlabel('POI count -- {0} category'.format(hd[i]))
plt.ylabel('Crime rate (per 10,000)')
def correlation_demo_crime():
"""
demographics correlation with crime
"""
Y = retrieve_crime_count(year=2010, col=['total'], region='ca')
h, D = generate_corina_features(region='ca')
print h
popul = D[:,0].reshape(D.shape[0],1)
Y = np.divide(Y, popul) * 10000
Y = Y.reshape( (len(Y),) )
D = D.transpose()
for i in range(D.shape[0]):
demo = D[i,:].reshape( (Y.shape ) )
r, p = pearsonr(demo, Y)
print r, p
def correlation_socialflow_crime(region='tract', useRate=False,
weightSocialFlow=False):
"""
calculate the correlation between social flow and crime count.
"""
if region == 'ca':
W = generate_transition_SocialLag(region='ca')
W2 = generate_geographical_SpatialLag_ca()
Y = retrieve_crime_count(2010, region='ca')
elif region == 'tract':
W = generate_transition_SocialLag(region='tract')
W2, tractkey = generate_geographical_SpatialLag()
Ymap = retrieve_crime_count(2010, col=['total'], region='tract')
Y = np.array( [Ymap[k] for k in tractkey] ).reshape(len(Ymap), 1)
U = generate_corina_features(region)
if useRate:
print 'Use crime rate per 10,000 population'
if region == 'tract':
C_mtx = []
cnt = 0
for k in tractkey:
if k in U[1]:
C_mtx.append(U[1][k])
else:
cnt += 1
C_mtx.append( [1] + [0 for i in range(6)] ) # population 1
U = ( U[0], np.array( C_mtx ) )
print len(tractkey), cnt
popul = U[1][:,0].reshape(U[1].shape[0],1)
Y = np.divide(Y, popul) * 10000
if weightSocialFlow:
wC = 130.0 if useRate else 32.0 # constant parameter
poverty = U[1][:,2]
for i in range(W.shape[0]):
for j in range (W.shape[1]):
s = np.exp( - np.abs(poverty[i] - poverty[j]) / wC )
W[i][j] *= s
f1 = np.dot(W, Y)
r = np.transpose( np.hstack( (Y, f1) ) )
pcc1 = np.corrcoef(r)
f2 = np.dot(W2, Y)
r = np.transpose( np.hstack( (Y, f2) ) )
pcc2 = np.corrcoef(r)
print '{0}: social lag {1}, spatial lag {2}'.format(region, pcc1[0,1], pcc2[0,1])
def line_socialflow_crime():
W = generate_transition_SocialLag(region='ca')
C = generate_corina_features()
poverty = C[1][:,2]
for i in range(W.shape[0]):
for j in range (W.shape[1]):
W[i][j] *= np.exp( - np.abs(poverty[i] - poverty[j]) / 32 )
Y = retrieve_crime_count(2010, col=['total'], region='ca')
h, D = generate_corina_features(region='ca')
popul = D[:,0].reshape(D.shape[0],1)
Y = np.divide(Y, popul) * 10000
f1 = np.dot(W, Y)
plt.scatter(f1, Y)
plt.xlabel('Social lag weighted by demographic similarity')
plt.ylabel('crime rate')
def line_spatialflow_crime():
W = generate_geographical_SpatialLag_ca()
Y = retrieve_crime_count(2010, col=['total'], region='ca')
h, D = generate_corina_features(region='ca')
popul = D[:,0].reshape(D.shape[0],1)
Y = np.divide(Y, popul) * 10000
f1 = np.dot(W, Y)
plt.figure()
plt.scatter(f1, Y)
plt.axis([0,700000, 0, 6000])
idx = [31, 75, 37]
sf1 = f1[idx]
sY = Y[idx]
plt.scatter(sf1, sY, edgecolors='red', s=50, linewidths=2 )
plt.figtext(0.43, 0.78, '#32', fontsize='large')
plt.figtext(0.15, 0.37, '#76', fontsize='large')
plt.figtext(0.79, 0.33, '#38', fontsize='large')
plt.xlabel('Geographical influence feature value', fontsize='x-large')
plt.ylabel('Crime rate', fontsize='x-large')
plt.savefig('spatial-crime-rate.pdf', format='pdf')
return Y
from taxiFlow import getTaxiFlow
def line_taxiflow_crime():
s = getTaxiFlow(normalization='bydestination')
Y = retrieve_crime_count(2010, col=['total'], region='ca')
h, D = generate_corina_features(region='ca')
popul = D[:,0].reshape(D.shape[0],1)
Y = np.divide(Y, popul) * 10000
f1 = np.dot(s, Y)
plt.figure()
plt.scatter(f1, Y)
plt.axis([0, 6000, 0, 6000])
idx = [31, 46]
sf1 = f1[idx]
sY = Y[idx]
plt.scatter(sf1, sY, edgecolors='red', s=50, linewidths=2 )
plt.figtext(0.33, 0.8, '#32', fontsize='large')
plt.figtext(0.75, 0.34, '#47', fontsize='large')
plt.xlabel('Hyperlink by taxi flow feature value', fontsize='x-large')
plt.ylabel('Crime rate', fontsize='x-large')
plt.savefig('taxi-flow-percent.pdf', format='pdf')
return f1
def correlation_taxiflow_crime(flowPercentage=True, crimeRate=True):
"""
correlation between taxi flow and crime
"""
s = getTaxiFlow(usePercentage=flowPercentage)
Y = retrieve_crime_count(2010, region='ca')
if crimeRate:
h, D = generate_corina_features(region='ca')
popul = D[:,0].reshape(D.shape[0],1)
Y = np.divide(Y, popul) * 10000
f1 = np.dot(s, Y)
r = np.hstack( (f1, Y) )
r = np.transpose(r)
pcc = np.corrcoef(r)
print pcc
if __name__ == '__main__':
# correlation_POIdist_crime()
# correlation_POI_crime('ca')
# r = line_taxiflow_crime()
# line_POI_crime()
# line_socialflow_crime()
# r = line_spatialflow_crime()
# correlation_socialflow_crime(region='ca', useRate=True, weightSocialFlow=True)
r = correlation_demo_crime()
# correlation_taxiflow_crime(flowPercentage=True, crimeRate=True)
| mit | -3,759,780,219,904,268,300 | 27.086207 | 90 | 0.56108 | false |
msbeta/apollo | modules/tools/mapshow/mapshow.py | 1 | 3418 | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import matplotlib.pyplot as plt
from libs.map import Map
from libs.localization import Localization
from libs.path import Path
def draw(map):
lane_ids = args.laneid
if lane_ids is None:
lane_ids = []
map.draw_lanes(plt, args.showlaneids, lane_ids, args.showlanedetails)
if args.showsignals:
map.draw_signal_lights(plt)
if args.showstopsigns:
map.draw_stop_signs(plt)
if args.showjunctions:
map.draw_pnc_junctions(plt)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Mapshow is a tool to display hdmap info on a map.",
prog="mapshow.py")
parser.add_argument(
"-m", "--map", action="store", type=str, required=True,
help="Specify the map file in txt or binary format")
parser.add_argument(
"-m2", "--map2", action="store", type=str, required=False,
help="Specify the map file in txt or binary format")
parser.add_argument(
"-sl", "--showlaneids", action="store_const", const=True,
help="Show all lane ids in map")
parser.add_argument(
"-sld", "--showlanedetails", action="store_const", const=True,
help="Show all lane ids in map")
parser.add_argument(
"-l", "--laneid", nargs='+',
help="Show specific lane id(s) in map")
parser.add_argument(
"-signal", "--showsignals", action="store_const", const=True,
help="Show all signal light stop lines with ids in map")
parser.add_argument(
"-stopsign", "--showstopsigns", action="store_const", const=True,
help="Show all stop sign stop lines with ids in map")
parser.add_argument(
"-junction", "--showjunctions", action="store_const", const=True,
help="Show all pnc-junctions with ids in map")
parser.add_argument(
"--loc", action="store", type=str, required=False,
help="Specify the localization pb file in txt format")
# driving path data files are text files with data format of
# t,x,y,heading,speed
parser.add_argument(
"-dp", "--drivingpath", nargs='+',
help="Show driving paths in map")
args = parser.parse_args()
map = Map()
map.load(args.map)
draw(map)
if args.map2 is not None:
map2 = Map()
map2.load(args.map2)
draw(map2)
if args.drivingpath is not None:
path = Path(args.drivingpath)
path.draw(plt)
if args.loc is not None:
localization = Localization()
localization.load(args.loc)
localization.plot_vehicle(plt)
plt.axis('equal')
plt.show()
| apache-2.0 | 1,469,060,692,143,007,500 | 34.237113 | 79 | 0.61732 | false |
yingyun001/ovirt-engine | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/system/image_upload.py | 8 | 5681 | #
# image_upload plugin -- ovirt engine setup
#
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
ISO image uploader plugin.
"""
import datetime
import gettext
import os
import shutil
from otopi import plugin, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import util as osetuputil
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""
ISO image uploader plugin.
"""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = False
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
osetupcons.ConfigEnv.ISO_PATHS_TO_UPLOAD,
[]
)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
)
def _setup(self):
self.environment[osetupcons.ConfigEnv.ISO_PATHS_TO_UPLOAD] = [
os.path.join(
osetupcons.FileLocations.VIRTIO_WIN_DIR,
'virtio-win_x86.vfd',
),
os.path.join(
osetupcons.FileLocations.VIRTIO_WIN_DIR,
'virtio-win_amd64.vfd',
),
os.path.join(
osetupcons.FileLocations.VIRTIO_WIN_DIR,
'virtio-win.iso',
),
os.path.join(
osetupcons.FileLocations.OVIRT_GUEST_TOOLS_DIR,
'ovirt-tools-setup.iso',
),
os.path.join(
osetupcons.FileLocations.RHEV_GUEST_TOOLS_DIR,
'rhev-tools-setup.iso',
),
]
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: (
not self.environment[
osetupcons.CoreEnv.DEVELOPER_MODE
] and
self.environment[
oenginecons.EngineDBEnv.NEW_DATABASE
] and
self.environment[
oenginecons.SystemEnv.NFS_CONFIG_ENABLED
]
),
)
def _validation(self):
for filename in self.environment[
osetupcons.ConfigEnv.ISO_PATHS_TO_UPLOAD
]:
if os.path.exists(filename):
self._enabled = True
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: self._enabled,
after=(
oenginecons.Stages.CONFIG_ISO_DOMAIN_AVAILABLE,
),
)
def _misc(self):
"""
Load files (iso, vfd) from existing rpms to the NFS ISO domain
TODO: use engine-iso-uploader when it will support local destinations
"""
uninstall_files = []
self.environment[
osetupcons.CoreEnv.REGISTER_UNINSTALL_GROUPS
].createGroup(
group='iso_images',
description='Uploaded ISO images',
optional=True
).addFiles(
group='iso_images',
fileList=uninstall_files,
)
targetDir = self.environment[
oenginecons.ConfigEnv.ISO_DOMAIN_STORAGE_DIR
]
# Iterate the list and copy all the files.
for filename in self.environment[
osetupcons.ConfigEnv.ISO_PATHS_TO_UPLOAD
]:
if os.path.exists(filename):
try:
targetFile = os.path.join(
targetDir,
os.path.basename(filename)
)
if os.path.exists(targetFile):
shutil.move(
targetFile,
'%s.%s' % (
targetFile,
datetime.datetime.now().strftime(
'%Y%m%d%H%M%S'
)
)
)
shutil.copyfile(filename, targetFile)
uninstall_files.append(targetFile)
os.chmod(targetFile, 0o644)
os.chown(
targetFile,
osetuputil.getUid(
self.environment[oengcommcons.SystemEnv.USER_VDSM]
),
osetuputil.getGid(
self.environment[oengcommcons.SystemEnv.GROUP_KVM]
)
)
except (OSError, shutil.Error) as e:
self.logger.warning(
_(
"Cannot copy '{filename}' to iso domain "
"'{directory}', error: {error}"
).format(
filename=filename,
directory=targetDir,
error=e,
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| apache-2.0 | 4,499,539,830,241,498,000 | 30.38674 | 78 | 0.51593 | false |
cloudera/hue | desktop/libs/notebook/src/notebook/api.py | 2 | 37207 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
import json
import logging
import sqlparse
import sys
from django.urls import reverse
from django.db.models import Q
from django.views.decorators.http import require_GET, require_POST
from rest_framework.decorators import api_view
import opentracing.tracer
from azure.abfs.__init__ import abfspath
from desktop.conf import TASK_SERVER, ENABLE_CONNECTORS
from desktop.lib.i18n import smart_str
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document2, Document, __paginate, _get_gist_document, FilesystemException
from indexer.file_format import HiveFormat
from indexer.fields import Field
from metadata.conf import OPTIMIZER
from notebook.conf import EXAMPLES
from notebook.connectors.base import Notebook, QueryExpired, SessionExpired, QueryError, _get_snippet_name, patch_snippet_for_connector
from notebook.connectors.hiveserver2 import HS2Api
from notebook.decorators import api_error_handler, check_document_access_permission, check_document_modify_permission
from notebook.models import escape_rows, make_notebook, upgrade_session_properties, get_api, _get_dialect_example
if sys.version_info[0] > 2:
from urllib.parse import unquote as urllib_unquote
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
from urllib import unquote as urllib_unquote
LOG = logging.getLogger(__name__)
DEFAULT_HISTORY_NAME = ''
@require_POST
@api_error_handler
def create_notebook(request):
response = {'status': -1}
editor_type = request.POST.get('type', 'notebook')
gist_id = request.POST.get('gist')
directory_uuid = request.POST.get('directory_uuid')
is_blank = request.POST.get('blank', 'false') == 'true'
if gist_id:
gist_doc = _get_gist_document(uuid=gist_id)
statement = json.loads(gist_doc.data)['statement']
editor = make_notebook(
name='',
description='',
editor_type=editor_type,
statement=statement,
is_presentation_mode=True
)
else:
editor = Notebook()
if EXAMPLES.AUTO_OPEN.get() and not is_blank:
document = _get_dialect_example(dialect=editor_type)
if document:
editor = Notebook(document=document)
editor = upgrade_session_properties(request, editor)
data = editor.get_data()
if editor_type != 'notebook':
data['name'] = ''
data['type'] = 'query-%s' % editor_type # TODO: Add handling for non-SQL types
data['directoryUuid'] = directory_uuid
editor.data = json.dumps(data)
response['notebook'] = editor.get_data()
response['status'] = 0
return JsonResponse(response)
@api_view(["POST"]) # To fully port when Web Components are decoupled
@require_POST
@check_document_access_permission
@api_error_handler
def create_session(request):
response = {'status': -1}
session = json.loads(request.POST.get('session', '{}'))
properties = session.get('properties', [])
response['session'] = get_api(request, session).create_session(lang=session['type'], properties=properties)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def close_session(request):
response = {'status': -1}
session = json.loads(request.POST.get('session', '{}'))
response['session'] = get_api(request, {'type': session['type']}).close_session(session=session)
response['status'] = 0
return JsonResponse(response)
def _execute_notebook(request, notebook, snippet):
response = {'status': -1}
result = None
history = None
active_executable = None
historify = (notebook['type'] != 'notebook' or snippet.get('wasBatchExecuted')) and not notebook.get('skipHistorify')
try:
try:
sessions = notebook.get('sessions') and notebook['sessions'] # Session reference for snippet execution without persisting it
active_executable = json.loads(request.POST.get('executable', '{}')) # Editor v2
# TODO: Use statement, database etc. from active_executable
if historify:
history = _historify(notebook, request.user)
notebook = Notebook(document=history).get_data()
interpreter = get_api(request, snippet)
if snippet.get('interface') == 'sqlalchemy':
interpreter.options['session'] = sessions[0]
with opentracing.tracer.start_span('interpreter') as span:
# interpreter.execute needs the sessions, but we don't want to persist them
pre_execute_sessions = notebook['sessions']
notebook['sessions'] = sessions
response['handle'] = interpreter.execute(notebook, snippet)
notebook['sessions'] = pre_execute_sessions
# Retrieve and remove the result from the handle
if response['handle'].get('sync'):
result = response['handle'].pop('result')
finally:
if historify:
_snippet = [s for s in notebook['snippets'] if s['id'] == snippet['id']][0]
if 'id' in active_executable: # Editor v2
# notebook_executable is the 1-to-1 match of active_executable in the notebook structure
notebook_executable = [e for e in _snippet['executor']['executables'] if e['id'] == active_executable['id']][0]
if 'handle' in response:
notebook_executable['handle'] = response['handle']
if history:
notebook_executable['history'] = {
'id': history.id,
'uuid': history.uuid
}
notebook_executable['operationId'] = history.uuid
if 'handle' in response: # No failure
if 'result' not in _snippet: # Editor v2
_snippet['result'] = {}
_snippet['result']['handle'] = response['handle']
_snippet['result']['statements_count'] = response['handle'].get('statements_count', 1)
_snippet['result']['statement_id'] = response['handle'].get('statement_id', 0)
_snippet['result']['handle']['statement'] = response['handle'].get(
'statement', snippet['statement']
).strip() # For non HS2, as non multi query yet
else:
_snippet['status'] = 'failed'
if history: # If _historify failed, history will be None.
# If we get Atomic block exception, something underneath interpreter.execute() crashed and is not handled.
history.update_data(notebook)
history.save()
response['history_id'] = history.id
response['history_uuid'] = history.uuid
if notebook['isSaved']: # Keep track of history of saved queries
response['history_parent_uuid'] = history.dependencies.filter(type__startswith='query-').latest('last_modified').uuid
except QueryError as ex: # We inject the history information from _historify() to the failed queries
if response.get('history_id'):
ex.extra['history_id'] = response['history_id']
if response.get('history_uuid'):
ex.extra['history_uuid'] = response['history_uuid']
if response.get('history_parent_uuid'):
ex.extra['history_parent_uuid'] = response['history_parent_uuid']
raise ex
# Inject and HTML escape results
if result is not None:
response['result'] = result
response['result']['data'] = escape_rows(result['data'])
response['status'] = 0
return response
@require_POST
@check_document_access_permission
@api_error_handler
def execute(request, dialect=None):
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
if dialect:
notebook['dialect'] = dialect
with opentracing.tracer.start_span('notebook-execute') as span:
span.set_tag('user-id', request.user.username)
response = _execute_notebook(request, notebook, snippet)
span.set_tag('query-id', response.get('handle', {}).get('guid'))
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def check_status(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
with opentracing.tracer.start_span('notebook-check_status') as span:
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet.get('result', {}).get('handle', {}).get('guid')
)
response = _check_status(request, notebook=notebook, snippet=snippet, operation_id=operation_id)
return JsonResponse(response)
def _check_status(request, notebook=None, snippet=None, operation_id=None):
response = {'status': -1}
if operation_id or not snippet: # To unify with _get_snippet
nb_doc = Document2.objects.get_by_uuid(user=request.user, uuid=operation_id or notebook['uuid'])
notebook = Notebook(document=nb_doc).get_data() # Used below
snippet = notebook['snippets'][0]
try:
response['query_status'] = get_api(request, snippet).check_status(notebook, snippet)
response['status'] = 0
except SessionExpired:
response['status'] = 'expired'
raise
except QueryExpired:
response['status'] = 'expired'
raise
finally:
if response['status'] == 0 and snippet['status'] != response['query_status']:
status = response['query_status']['status']
elif response['status'] == 'expired':
status = 'expired'
else:
status = 'failed'
if response.get('query_status'):
has_result_set = response['query_status'].get('has_result_set')
else:
has_result_set = None
if notebook.get('dialect') or notebook['type'].startswith('query') or notebook.get('isManaged'):
nb_doc = Document2.objects.get_by_uuid(user=request.user, uuid=operation_id or notebook['uuid'])
if nb_doc.can_write(request.user):
nb = Notebook(document=nb_doc).get_data()
if status != nb['snippets'][0]['status'] or has_result_set != nb['snippets'][0].get('has_result_set'):
nb['snippets'][0]['status'] = status
if has_result_set is not None:
nb['snippets'][0]['has_result_set'] = has_result_set
nb_doc.update_data(nb)
nb_doc.save()
return response
@require_POST
@check_document_access_permission
@api_error_handler
def fetch_result_data(request):
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
rows = json.loads(request.POST.get('rows', '100'))
start_over = json.loads(request.POST.get('startOver', 'false'))
with opentracing.tracer.start_span('notebook-fetch_result_data') as span:
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response = _fetch_result_data(request, notebook, snippet, operation_id, rows=rows, start_over=start_over)
response['status'] = 0
return JsonResponse(response)
def _fetch_result_data(request, notebook=None, snippet=None, operation_id=None, rows=100, start_over=False, nulls_only=False):
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
response = {
'result': get_api(request, snippet).fetch_result(notebook, snippet, rows, start_over)
}
# Materialize and HTML escape results
if response['result'].get('data') and response['result'].get('type') == 'table' and not response['result'].get('isEscaped'):
response['result']['data'] = escape_rows(response['result']['data'], nulls_only=nulls_only)
response['result']['isEscaped'] = True
return response
@require_POST
@check_document_access_permission
@api_error_handler
def fetch_result_metadata(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-fetch_result_metadata') as span:
response['result'] = get_api(request, snippet).fetch_result_metadata(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def fetch_result_size(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-fetch_result_size') as span:
response['result'] = get_api(request, snippet).fetch_result_size(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def cancel_statement(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = None
operation_id = request.POST.get('operationId') or notebook['uuid']
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-cancel_statement') as span:
response['result'] = get_api(request, snippet).cancel(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def get_logs(request):
response = {'status': -1}
operation_id = request.POST.get('operationId')
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
if operation_id:
notebook['uuid'] = operation_id
startFrom = request.POST.get('from')
startFrom = int(startFrom) if startFrom else None
size = request.POST.get('size')
size = int(size) if size else None
full_log = smart_str(request.POST.get('full_log', ''))
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
db = get_api(request, snippet)
with opentracing.tracer.start_span('notebook-get_logs') as span:
logs = smart_str(db.get_log(notebook, snippet, startFrom=startFrom, size=size))
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
full_log += logs
jobs = db.get_jobs(notebook, snippet, full_log)
response['logs'] = logs.strip()
response['progress'] = min(
db.progress(notebook, snippet, logs=full_log),
99
) if snippet['status'] != 'available' and snippet['status'] != 'success' else 100
response['jobs'] = jobs
response['isFullLogs'] = db.get_log_is_full_log(notebook, snippet)
response['status'] = 0
return JsonResponse(response)
def _save_notebook(notebook, user):
if notebook['snippets'][0].get('connector') and notebook['snippets'][0]['connector'].get('dialect'): # TODO Connector unification
notebook_type = 'query-%(dialect)s' % notebook['snippets'][0]['connector']
if notebook['snippets'][0] and notebook['snippets'][0].get('executor'):
notebook['snippets'][0]['executor']['executables'] = []
else:
notebook_type = notebook.get('type', 'notebook')
save_as = False
if notebook.get('parentSavedQueryUuid'): # We save into the original saved query, not into the query history
notebook_doc = Document2.objects.get_by_uuid(user=user, uuid=notebook['parentSavedQueryUuid'])
elif notebook.get('id'):
notebook_doc = Document2.objects.get(id=notebook['id'])
else:
notebook_doc = Document2.objects.create(name=notebook['name'], uuid=notebook['uuid'], type=notebook_type, owner=user)
Document.objects.link(
notebook_doc, owner=notebook_doc.owner, name=notebook_doc.name, description=notebook_doc.description, extra=notebook_type
)
save_as = True
if notebook.get('directoryUuid'):
notebook_doc.parent_directory = Document2.objects.get_by_uuid(user=user, uuid=notebook.get('directoryUuid'), perm_type='write')
else:
notebook_doc.parent_directory = Document2.objects.get_home_directory(user)
notebook['isSaved'] = True
notebook['isHistory'] = False
notebook['id'] = notebook_doc.id
_clear_sessions(notebook)
notebook_doc1 = notebook_doc._get_doc1(doc2_type=notebook_type)
if ENABLE_CONNECTORS.get():
notebook_doc.connector_id = int(notebook['snippets'][0]['connector']['type'])
notebook_doc.update_data(notebook)
notebook_doc.search = _get_statement(notebook)
notebook_doc.name = notebook_doc1.name = notebook['name']
notebook_doc.description = notebook_doc1.description = notebook['description']
notebook_doc.save()
notebook_doc1.save()
return notebook_doc, save_as
@api_error_handler
@require_POST
@check_document_modify_permission()
def save_notebook(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
notebook_doc, save_as = _save_notebook(notebook, request.user)
response['status'] = 0
response['save_as'] = save_as
response.update(notebook_doc.to_dict())
response['message'] = request.POST.get('editorMode') == 'true' and _('Query saved successfully') or _('Notebook saved successfully')
return JsonResponse(response)
def _clear_sessions(notebook):
notebook['sessions'] = [_s for _s in notebook['sessions'] if _s['type'] in ('scala', 'spark', 'pyspark', 'sparkr', 'r')]
def _historify(notebook, user):
query_type = 'query-%(dialect)s' % notebook if ENABLE_CONNECTORS.get() else notebook['type']
name = notebook['name'] if (notebook['name'] and notebook['name'].strip() != '') else DEFAULT_HISTORY_NAME
is_managed = notebook.get('isManaged') == True # Prevents None
if is_managed and Document2.objects.filter(uuid=notebook['uuid']).exists():
history_doc = Document2.objects.get(uuid=notebook['uuid'])
else:
history_doc = Document2.objects.create(
name=name,
type=query_type,
owner=user,
is_history=True,
is_managed=is_managed,
)
# Link history of saved query
if notebook['isSaved']:
# From previous history query or initial saved query
parent_doc = Document2.objects.get(uuid=notebook.get('parentSavedQueryUuid') or notebook['uuid'])
notebook['parentSavedQueryUuid'] = parent_doc.uuid
history_doc.dependencies.add(parent_doc)
if not is_managed:
Document.objects.link(
history_doc,
name=history_doc.name,
owner=history_doc.owner,
description=history_doc.description,
extra=query_type
)
notebook['uuid'] = history_doc.uuid
_clear_sessions(notebook)
if ENABLE_CONNECTORS.get():
history_doc.connector_id = int(notebook['type'].split('-')[1])
history_doc.update_data(notebook)
history_doc.search = _get_statement(notebook)
history_doc.save()
return history_doc
def _get_statement(notebook):
if notebook['snippets'] and len(notebook['snippets']) > 0:
snippet = notebook['snippets'][0]
try:
if snippet.get('executor', {}).get('executables', []): # With Connectors/Editor 2
executable = snippet['executor']['executables'][0]
if executable.get('handle'):
return executable['handle']['statement']
else:
return executable['parsedStatement']['statement']
return Notebook.statement_with_variables(snippet)
except KeyError as e:
LOG.warning('Could not get statement from query history: %s' % e)
return ''
@require_GET
@api_error_handler
@check_document_access_permission
def get_history(request):
response = {'status': -1}
doc_type = request.GET.get('doc_type')
doc_text = request.GET.get('doc_text')
connector_id = request.GET.get('doc_connector')
page = min(int(request.GET.get('page', 1)), 100)
limit = min(int(request.GET.get('limit', 50)), 100)
is_notification_manager = request.GET.get('is_notification_manager', 'false') == 'true'
if is_notification_manager:
docs = Document2.objects.get_tasks_history(user=request.user)
else:
docs = Document2.objects.get_history(doc_type='query-%s' % doc_type, connector_id=connector_id, user=request.user)
if doc_text:
docs = docs.filter(Q(name__icontains=doc_text) | Q(description__icontains=doc_text) | Q(search__icontains=doc_text))
# Paginate
docs = docs.order_by('-last_modified')
response['count'] = docs.count()
docs = __paginate(page, limit, queryset=docs)['documents']
history = []
for doc in docs:
notebook = Notebook(document=doc).get_data()
if 'snippets' in notebook:
statement = notebook['description'] if is_notification_manager else _get_statement(notebook)
history.append({
'name': doc.name,
'id': doc.id,
'uuid': doc.uuid,
'type': doc.type,
'data': {
'statement': statement[:1001] if statement else '',
'lastExecuted': notebook['snippets'][0].get('lastExecuted', -1),
'status': notebook['snippets'][0].get('status', ''),
'parentSavedQueryUuid': notebook.get('parentSavedQueryUuid', '')
} if notebook['snippets'] else {},
'absoluteUrl': doc.get_absolute_url(),
})
else:
LOG.error('Incomplete History Notebook: %s' % notebook)
response['history'] = sorted(history, key=lambda row: row['data']['lastExecuted'], reverse=True)
response['message'] = _('History fetched')
response['status'] = 0
return JsonResponse(response)
@require_POST
@api_error_handler
@check_document_modify_permission()
def clear_history(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
doc_type = request.POST.get('doc_type')
is_notification_manager = request.POST.get('is_notification_manager', 'false') == 'true'
if is_notification_manager:
history = Document2.objects.get_tasks_history(user=request.user, allow_distinct=False)
else:
history = Document2.objects.get_history(doc_type='query-%s' % doc_type, user=request.user, allow_distinct=False)
response['updated'] = history.delete()
response['message'] = _('History cleared !')
response['status'] = 0
return JsonResponse(response)
@require_GET
@check_document_access_permission
def open_notebook(request):
response = {'status': -1}
notebook_id = request.GET.get('notebook')
notebook = Notebook(document=Document2.objects.get(id=notebook_id))
notebook = upgrade_session_properties(request, notebook)
response['status'] = 0
response['notebook'] = notebook.get_json()
response['message'] = _('Notebook loaded successfully')
@require_POST
@check_document_access_permission
def close_notebook(request):
response = {'status': -1, 'result': []}
notebook = json.loads(request.POST.get('notebook', '{}'))
for session in [_s for _s in notebook['sessions']]:
try:
api = get_api(request, session)
if hasattr(api, 'close_session_idle'):
response['result'].append(api.close_session_idle(notebook, session))
else:
response['result'].append(api.close_session(session))
except QueryExpired:
pass
except Exception as e:
LOG.exception('Error closing session %s' % str(e))
return JsonResponse(response)
@require_POST
@check_document_access_permission
def close_statement(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = None
operation_id = request.POST.get('operationId')
if operation_id and not notebook.get('uuid'):
notebook['uuid'] = operation_id
try:
snippet = _get_snippet(request.user, notebook, snippet, operation_id)
with opentracing.tracer.start_span('notebook-close_statement') as span:
response['result'] = get_api(request, snippet).close_statement(notebook, snippet)
span.set_tag('user-id', request.user.username)
span.set_tag(
'query-id',
snippet['result']['handle']['guid'] if snippet['result'].get('handle') and snippet['result']['handle'].get('guid') else None
)
except QueryExpired:
response['message'] = _('Query already expired.')
except FilesystemException:
response['message'] = _('Query id could not be found.')
else:
response['message'] = _('Query closed.')
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def autocomplete(request, server=None, database=None, table=None, column=None, nested=None):
response = {'status': -1}
# Passed by check_document_access_permission but unused by APIs
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
action = request.POST.get('operation', 'schema')
try:
autocomplete_data = get_api(request, snippet).autocomplete(snippet, database, table, column, nested, action)
response.update(autocomplete_data)
except QueryExpired as e:
LOG.warning('Expired query seen: %s' % e)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def get_sample_data(request, server=None, database=None, table=None, column=None):
response = {'status': -1}
# Passed by check_document_access_permission but unused by APIs
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
is_async = json.loads(request.POST.get('async', 'false'))
operation = json.loads(request.POST.get('operation', '"default"'))
sample_data = get_api(request, snippet).get_sample_data(snippet, database, table, column, is_async=is_async, operation=operation)
response.update(sample_data)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def explain(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
response = get_api(request, snippet).explain(notebook, snippet)
return JsonResponse(response)
@require_POST
@api_error_handler
def format(request):
response = {'status': 0}
statements = request.POST.get('statements', '')
response['formatted_statements'] = sqlparse.format(statements, reindent=True, keyword_case='upper') # SQL only currently
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def export_result(request):
response = {'status': -1, 'message': _('Success')}
# Passed by check_document_access_permission but unused by APIs
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
data_format = json.loads(request.POST.get('format', '"hdfs-file"'))
destination = urllib_unquote(json.loads(request.POST.get('destination', '""')))
overwrite = json.loads(request.POST.get('overwrite', 'false'))
is_embedded = json.loads(request.POST.get('is_embedded', 'false'))
start_time = json.loads(request.POST.get('start_time', '-1'))
api = get_api(request, snippet)
if data_format == 'hdfs-file': # Blocking operation, like downloading
if request.fs.isdir(destination):
if notebook.get('name'):
destination += '/%(name)s.csv' % notebook
else:
destination += '/%(type)s-%(id)s.csv' % notebook
if overwrite and request.fs.exists(destination):
request.fs.do_as_user(request.user.username, request.fs.rmtree, destination)
response['watch_url'] = api.export_data_as_hdfs_file(snippet, destination, overwrite)
response['status'] = 0
request.audit = {
'operation': 'EXPORT',
'operationText': 'User %s exported to HDFS destination: %s' % (request.user.username, destination),
'allowed': True
}
elif data_format == 'hive-table':
if is_embedded:
sql, success_url = api.export_data_as_table(notebook, snippet, destination)
task = make_notebook(
name=_('Export %s query to table %s') % (snippet['type'], destination),
description=_('Query %s to %s') % (_get_snippet_name(notebook), success_url),
editor_type=snippet['type'],
statement=sql,
status='ready',
database=snippet['database'],
on_success_url=success_url,
last_executed=start_time,
is_task=True
)
response = task.execute(request)
else:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
response['watch_url'] = reverse('notebook:execute_and_watch') + '?action=save_as_table¬ebook=' + str(notebook_id) + \
'&snippet=0&destination=' + destination
response['status'] = 0
request.audit = {
'operation': 'EXPORT',
'operationText': 'User %s exported to Hive table: %s' % (request.user.username, destination),
'allowed': True
}
elif data_format == 'hdfs-directory':
if destination.lower().startswith("abfs"):
destination = abfspath(destination)
if request.fs.exists(destination) and request.fs.listdir_stats(destination):
raise PopupException(_('The destination is not an empty directory!'))
if is_embedded:
sql, success_url = api.export_large_data_to_hdfs(notebook, snippet, destination)
task = make_notebook(
name=_('Export %s query to directory') % snippet['type'],
description=_('Query %s to %s') % (_get_snippet_name(notebook), success_url),
editor_type=snippet['type'],
statement=sql,
status='ready-execute',
database=snippet['database'],
on_success_url=success_url,
last_executed=start_time,
is_task=True
)
response = task.execute(request)
else:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
response['watch_url'] = reverse('notebook:execute_and_watch') + '?action=insert_as_query¬ebook=' + str(notebook_id) + \
'&snippet=0&destination=' + destination
response['status'] = 0
request.audit = {
'operation': 'EXPORT',
'operationText': 'User %s exported to HDFS directory: %s' % (request.user.username, destination),
'allowed': True
}
elif data_format in ('search-index', 'dashboard'):
# Open the result in the Dashboard via a SQL sub-query or the Import wizard (quick vs scalable)
if is_embedded:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
if data_format == 'dashboard':
engine = notebook['type'].replace('query-', '')
response['watch_url'] = reverse(
'dashboard:browse',
kwargs={'name': notebook_id}
) + '?source=query&engine=%(engine)s' % {'engine': engine}
response['status'] = 0
else:
sample = get_api(request, snippet).fetch_result(notebook, snippet, rows=4, start_over=True)
for col in sample['meta']:
col['type'] = HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')
response['status'] = 0
response['id'] = notebook_id
response['name'] = _get_snippet_name(notebook)
response['source_type'] = 'query'
response['target_type'] = 'index'
response['target_path'] = destination
response['sample'] = list(sample['data'])
response['columns'] = [
Field(col['name'], col['type']).to_dict() for col in sample['meta']
]
else:
notebook_id = notebook['id'] or request.GET.get('editor', request.GET.get('notebook'))
response['watch_url'] = reverse('notebook:execute_and_watch') + '?action=index_query¬ebook=' + str(notebook_id) + \
'&snippet=0&destination=' + destination
response['status'] = 0
if response.get('status') != 0:
response['message'] = _('Exporting result failed.')
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def statement_risk(request):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
interface = request.POST.get('interface', OPTIMIZER.INTERFACE.get())
api = get_api(request, snippet)
response['query_complexity'] = api.statement_risk(interface, notebook, snippet)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def statement_compatibility(request):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
interface = request.POST.get('interface', OPTIMIZER.INTERFACE.get())
source_platform = request.POST.get('sourcePlatform')
target_platform = request.POST.get('targetPlatform')
api = get_api(request, snippet)
response['query_compatibility'] = api.statement_compatibility(
interface,
notebook,
snippet,
source_platform=source_platform,
target_platform=target_platform
)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def statement_similarity(request):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
interface = request.POST.get('interface', OPTIMIZER.INTERFACE.get())
source_platform = request.POST.get('sourcePlatform')
api = get_api(request, snippet)
response['statement_similarity'] = api.statement_similarity(interface, notebook, snippet, source_platform=source_platform)
response['status'] = 0
return JsonResponse(response)
@require_POST
@check_document_access_permission
@api_error_handler
def get_external_statement(request):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
snippet = json.loads(request.POST.get('snippet', '{}'))
if snippet.get('statementType') == 'file':
response['statement'] = _get_statement_from_file(request.user, request.fs, snippet)
elif snippet.get('statementType') == 'document':
notebook = Notebook(Document2.objects.get_by_uuid(user=request.user, uuid=snippet['associatedDocumentUuid'], perm_type='read'))
response['statement'] = notebook.get_str()
response['status'] = 0
return JsonResponse(response)
def _get_statement_from_file(user, fs, snippet):
script_path = snippet['statementPath']
if script_path:
script_path = script_path.replace('hdfs://', '')
if fs.do_as_user(user, fs.isfile, script_path):
return fs.do_as_user(user, fs.read, script_path, 0, 16 * 1024 ** 2)
@require_POST
@api_error_handler
def describe(request, database, table=None, column=None):
response = {'status': -1, 'message': ''}
notebook = json.loads(request.POST.get('notebook', '{}'))
source_type = request.POST.get('source_type', '')
connector = json.loads(request.POST.get('connector', '{}'))
snippet = {'type': source_type, 'connector': connector}
patch_snippet_for_connector(snippet)
describe = get_api(request, snippet).describe(notebook, snippet, database, table, column=column)
response.update(describe)
return JsonResponse(response)
def _get_snippet(user, notebook, snippet, operation_id):
if operation_id or not snippet:
nb_doc = Document2.objects.get_by_uuid(user=user, uuid=operation_id or notebook.get('uuid'))
notebook = Notebook(document=nb_doc).get_data()
snippet = notebook['snippets'][0]
return snippet
| apache-2.0 | -3,048,649,456,593,295,000 | 34.435238 | 135 | 0.676781 | false |
Rogentos/legacy-anaconda | cmdline.py | 3 | 6311 | #
# cmdline.py - non-interactive, very very simple frontend to anaconda
#
# Copyright (C) 2003, 2004, 2005, 2006, 2007 Red Hat, Inc.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Jeremy Katz <[email protected]
#
import time
import signal
import parted
from constants import *
from flags import flags
from iutil import strip_markup
from installinterfacebase import InstallInterfaceBase
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import logging
log = logging.getLogger("anaconda")
stepToClasses = { "install" : "setupProgressDisplay" }
class WaitWindow:
def pop(self):
pass
def refresh(self):
pass
def __init__(self, title, text):
print(text)
class ProgressWindow:
def pop(self):
print("")
def pulse(self):
pass
def set(self, amount):
if amount == self.total:
print(_("Completed"))
def refresh(self):
pass
def __init__(self, title, text, total, updpct = 0.05, pulse = False):
self.total = total
print(text)
print(_("In progress"))
class InstallInterface(InstallInterfaceBase):
def __init__(self):
InstallInterfaceBase.__init__(self)
# signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTSTP, signal.SIG_DFL)
self.instProgress = None
def __del__(self):
pass
def shutdown(self):
pass
def suspend(self):
pass
def resume(self):
pass
def progressWindow(self, title, text, total, updpct = 0.05, pulse = False):
return ProgressWindow(title, text, total, updpct, pulse)
def kickstartErrorWindow(self, text):
s = _("The following error was found while parsing the "
"kickstart configuration file:\n\n%s") %(text,)
print(s)
while 1:
time.sleep(5)
def messageWindow(self, title, text, type="ok", default = None,
custom_icon = None, custom_buttons = []):
if type == "ok":
print(text)
else:
print(_("Command line mode requires all choices to be specified in a kickstart configuration file."))
print(title)
print(text)
print(type, custom_buttons)
# don't exit
while 1:
time.sleep(5)
def detailedMessageWindow(self, title, text, longText=None, type="ok",
default=None, custom_buttons=None,
custom_icon=None):
if longText:
text += "\n\n%s" % longText
self.messageWindow(title, text, type=type, default=default,
custom_buttons=custom_buttons, custom_icon=custom_icon)
def passphraseEntryWindow(self, device):
print(_("Can't have a question in command line mode!"))
print("(passphraseEntryWindow: '%s')" % device)
# don't exit
while 1:
time.sleep(5)
def getLUKSPassphrase(self, passphrase = "", isglobal = False):
print(_("Can't have a question in command line mode!"))
print("(getLUKSPassphrase)")
# don't exit
while 1:
time.sleep(5)
def enableNetwork(self):
print(_("Can't have a question in command line mode!"))
print("(enableNetwork)")
# don't exit
while 1:
time.sleep(5)
def resetInitializeDiskQuestion(self):
pass
def questionInitializeDisk(self, path, description, size, details=""):
print(_("Can't have a question in command line mode!"))
print("(questionInitializeDisk)")
# don't exit
while 1:
time.sleep(5)
def resetReinitInconsistentLVMQuestion(self):
pass
def questionReinitInconsistentLVM(self, pv_names=None, lv_name=None, vg_name=None):
print(_("Can't have a question in command line mode!"))
print("(questionReinitInconsistentLVM)")
# don't exit
while 1:
time.sleep(5)
def mainExceptionWindow(self, shortText, longTextFile):
print(shortText)
def waitWindow(self, title, text):
return WaitWindow(title, text)
def beep(self):
pass
def run(self, anaconda):
(step, instance) = anaconda.dispatch.currentStep()
while step:
if stepToClasses.has_key(step):
s = "nextWin = %s" %(stepToClasses[step],)
exec s
nextWin(instance)
else:
print("In interactive step %s, can't continue" %(step,))
while 1:
time.sleep(1)
anaconda.dispatch.gotoNext()
(step, instance) = anaconda.dispatch.currentStep()
def setInstallProgressClass(self, c):
self.instProgress = c
def setSteps(self, anaconda):
pass
class progressDisplay:
def __init__(self):
self.pct = 0
self.display = ""
def __del__(self):
pass
def processEvents(self):
pass
def setShowPercentage(self, val):
pass
def get_fraction(self):
return self.pct
def set_fraction(self, pct):
self.pct = pct
def set_text(self, txt):
pass
def set_label(self, txt):
stripped = strip_markup(txt)
if stripped != self.display:
self.display = stripped
print(self.display)
def setupProgressDisplay(anaconda):
if anaconda.dir == DISPATCH_BACK:
anaconda.intf.setInstallProgressClass(None)
return DISPATCH_BACK
else:
anaconda.intf.setInstallProgressClass(progressDisplay())
return DISPATCH_FORWARD
| gpl-2.0 | -6,505,053,441,133,871,000 | 27.556561 | 113 | 0.601965 | false |
bparzella/secsgem | secsgem/hsms/connectionstatemachine.py | 1 | 3401 | #####################################################################
# connectionstatemachine.py
#
# (c) Copyright 2013-2016, Benjamin Parzella. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#####################################################################
"""Contains the state machine for the connection state."""
from transitions.extensions import HierarchicalMachine as Machine
from transitions.extensions.nesting import NestedState
NestedState.separator = '_'
STATE_NOT_CONNECTED = "NOT-CONNECTED"
STATE_CONNECTED = "CONNECTED"
STATE_NOT_SELECTED = "NOT-SELECTED"
STATE_CONNECTED_NOT_SELECTED = "{}{}{}".format(STATE_CONNECTED, NestedState.separator, STATE_NOT_SELECTED)
STATE_SELECTED = "SELECTED"
STATE_CONNECTED_SELECTED = "{}{}{}".format(STATE_CONNECTED, NestedState.separator, STATE_SELECTED)
class ConnectionStateMachine:
"""HSMS Connection state machine."""
def __init__(self, callbacks=None):
"""
Initialize the hsms connection state machine.
:param callbacks: callbacks for the state machine
"""
self.callbacks = {}
self.states = [STATE_NOT_CONNECTED,
{
'name': STATE_CONNECTED,
'on_enter': self._on_enter_CONNECTED,
'on_exit': self._on_exit_CONNECTED,
'children': [
STATE_NOT_SELECTED,
{
'name': STATE_SELECTED,
'on_enter': self._on_enter_CONNECTED_SELECTED
}
]
}]
# transition 1
self.machine = Machine(model=self, states=self.states, initial=STATE_NOT_CONNECTED, auto_transitions=False)
if callbacks:
self.callbacks = callbacks
self.machine.add_transition('connect', STATE_NOT_CONNECTED, STATE_CONNECTED_NOT_SELECTED) # transition 2
self.machine.add_transition('disconnect', STATE_CONNECTED, STATE_NOT_CONNECTED) # transition 3
self.machine.add_transition('select', STATE_CONNECTED_NOT_SELECTED, STATE_CONNECTED_SELECTED) # transition 4
self.machine.add_transition('deselect', STATE_CONNECTED_SELECTED, STATE_CONNECTED_NOT_SELECTED) # transition 5
self.machine.add_transition('timeoutT7', STATE_CONNECTED_NOT_SELECTED, STATE_NOT_CONNECTED) # transition 6
def _on_enter_CONNECTED(self):
if "on_enter_CONNECTED" in self.callbacks:
self.callbacks["on_enter_CONNECTED"]()
def _on_exit_CONNECTED(self):
if "on_exit_CONNECTED" in self.callbacks:
self.callbacks["on_exit_CONNECTED"]()
def _on_enter_CONNECTED_SELECTED(self):
if "on_enter_CONNECTED_SELECTED" in self.callbacks:
self.callbacks["on_enter_CONNECTED_SELECTED"]()
| lgpl-2.1 | 1,699,473,985,043,087,000 | 43.168831 | 119 | 0.609821 | false |
sbc100/native_client | pynacl/download_utils.py | 9 | 6570 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A library to assist automatically downloading files.
This library is used by scripts that download tarballs, zipfiles, etc. as part
of the build process.
"""
import hashlib
import os.path
import re
import sys
import urllib2
import http_download
SOURCE_STAMP = 'SOURCE_URL'
HASH_STAMP = 'SOURCE_SHA1'
class HashError(Exception):
def __init__(self, download_url, expected_hash, actual_hash):
self.download_url = download_url
self.expected_hash = expected_hash
self.actual_hash = actual_hash
def __str__(self):
return 'Got hash "%s" but expected hash "%s" for "%s"' % (
self.actual_hash, self.expected_hash, self.download_url)
def EnsureFileCanBeWritten(filename):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
def WriteData(filename, data):
EnsureFileCanBeWritten(filename)
f = open(filename, 'wb')
f.write(data)
f.close()
def WriteDataFromStream(filename, stream, chunk_size, verbose=True):
EnsureFileCanBeWritten(filename)
dst = open(filename, 'wb')
try:
while True:
data = stream.read(chunk_size)
if len(data) == 0:
break
dst.write(data)
if verbose:
# Indicate that we're still writing.
sys.stdout.write('.')
sys.stdout.flush()
finally:
if verbose:
sys.stdout.write('\n')
dst.close()
def DoesStampMatch(stampfile, expected, index):
try:
f = open(stampfile, 'r')
stamp = f.read()
f.close()
if stamp.split('\n')[index] == expected:
return 'already up-to-date.'
elif stamp.startswith('manual'):
return 'manual override.'
return False
except IOError:
return False
def WriteStamp(stampfile, data):
EnsureFileCanBeWritten(stampfile)
f = open(stampfile, 'w')
f.write(data)
f.close()
def StampIsCurrent(path, stamp_name, stamp_contents, min_time=None, index=0):
stampfile = os.path.join(path, stamp_name)
stampmatch = DoesStampMatch(stampfile, stamp_contents, index)
# If toolchain was downloaded and/or created manually then keep it untouched
if stampmatch == 'manual override.':
return stampmatch
# Check if the stampfile is older than the minimum last mod time
if min_time:
try:
stamp_time = os.stat(stampfile).st_mtime
if stamp_time <= min_time:
return False
except OSError:
return False
return stampmatch
def WriteSourceStamp(path, url):
stampfile = os.path.join(path, SOURCE_STAMP)
WriteStamp(stampfile, url)
def WriteHashStamp(path, hash_val):
hash_stampfile = os.path.join(path, HASH_STAMP)
WriteStamp(hash_stampfile, hash_val)
def _HashFileHandle(fh):
"""sha1 of a file like object.
Arguments:
fh: file handle like object to hash.
Returns:
sha1 as a string.
"""
hasher = hashlib.sha1()
try:
while True:
data = fh.read(4096)
if not data:
break
hasher.update(data)
finally:
fh.close()
return hasher.hexdigest()
def HashFile(filename):
"""sha1 a file on disk.
Arguments:
filename: filename to hash.
Returns:
sha1 as a string.
"""
fh = open(filename, 'rb')
return _HashFileHandle(fh)
def HashUrlByDownloading(url):
"""sha1 the data at an url.
Arguments:
url: url to download from.
Returns:
sha1 of the data at the url.
"""
try:
fh = urllib2.urlopen(url)
except:
sys.stderr.write('Failed fetching URL: %s\n' % url)
raise
return _HashFileHandle(fh)
# Attempts to get the SHA1 hash of a file given a URL by looking for
# an adjacent file with a ".sha1hash" suffix. This saves having to
# download a large tarball just to get its hash. Otherwise, we fall
# back to downloading the main file.
def HashUrl(url):
hash_url = '%s.sha1hash' % url
try:
fh = urllib2.urlopen(hash_url)
data = fh.read(100)
fh.close()
except urllib2.HTTPError, exn:
if exn.code == 404:
return HashUrlByDownloading(url)
raise
else:
if not re.match('[0-9a-f]{40}\n?$', data):
raise AssertionError('Bad SHA1 hash file: %r' % data)
return data.strip()
def SyncURL(url, filename=None, stamp_dir=None, min_time=None,
hash_val=None, keep=False, verbose=False, stamp_index=0):
"""Synchronize a destination file with a URL
if the URL does not match the URL stamp, then we must re-download it.
Arugments:
url: the url which will to compare against and download
filename: the file to create on download
path: the download path
stamp_dir: the filename containing the URL stamp to check against
hash_val: if set, the expected hash which must be matched
verbose: prints out status as it runs
stamp_index: index within the stamp file to check.
Returns:
True if the file is replaced
False if the file is not replaced
Exception:
HashError: if the hash does not match
"""
assert url and filename
# If we are not keeping the tarball, or we already have it, we can
# skip downloading it for this reason. If we are keeping it,
# it must exist.
if keep:
tarball_ok = os.path.isfile(filename)
else:
tarball_ok = True
# If we don't need the tarball and the stamp_file matches the url, then
# we must be up to date. If the URL differs but the recorded hash matches
# the one we'll insist the tarball has, then that's good enough too.
# TODO(mcgrathr): Download the .sha1sum file first to compare with
# the cached hash, in case --file-hash options weren't used.
if tarball_ok and stamp_dir is not None:
if StampIsCurrent(stamp_dir, SOURCE_STAMP, url, min_time):
if verbose:
print '%s is already up to date.' % filename
return False
if (hash_val is not None and
StampIsCurrent(stamp_dir, HASH_STAMP, hash_val, min_time, stamp_index)):
if verbose:
print '%s is identical to the up to date file.' % filename
return False
if (os.path.isfile(filename)
and hash_val is not None
and hash_val == HashFile(filename)):
return True
if verbose:
print 'Updating %s\n\tfrom %s.' % (filename, url)
EnsureFileCanBeWritten(filename)
http_download.HttpDownload(url, filename)
if hash_val:
tar_hash = HashFile(filename)
if hash_val != tar_hash:
raise HashError(actual_hash=tar_hash, expected_hash=hash_val,
download_url=url)
return True
| bsd-3-clause | 8,852,930,381,967,876,000 | 25.491935 | 80 | 0.675342 | false |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py2/urllib3/contrib/securetransport.py | 20 | 32275 | """
SecureTranport support for urllib3 via ctypes.
This makes platform-native TLS available to urllib3 users on macOS without the
use of a compiler. This is an important feature because the Python Package
Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
this is to give macOS users an alternative solution to the problem, and that
solution is to use SecureTransport.
We use ctypes here because this solution must not require a compiler. That's
because pip is not allowed to require a compiler either.
This is not intended to be a seriously long-term solution to this problem.
The hope is that PEP 543 will eventually solve this issue for us, at which
point we can retire this contrib module. But in the short term, we need to
solve the impending tire fire that is Python on Mac without this kind of
contrib module. So...here we are.
To use this module, simply import and inject it::
import urllib3.contrib.securetransport
urllib3.contrib.securetransport.inject_into_urllib3()
Happy TLSing!
This code is a bastardised version of the code found in Will Bond's oscrypto
library. An enormous debt is owed to him for blazing this trail for us. For
that reason, this code should be considered to be covered both by urllib3's
license and by oscrypto's:
Copyright (c) 2015-2016 Will Bond <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import contextlib
import ctypes
import errno
import os.path
import shutil
import socket
import ssl
import threading
import weakref
from .. import util
from ._securetransport.bindings import Security, SecurityConst, CoreFoundation
from ._securetransport.low_level import (
_assert_no_error,
_cert_array_from_pem,
_temporary_keychain,
_load_client_cert_chain,
)
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
# SNI always works
HAS_SNI = True
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
# This dictionary is used by the read callback to obtain a handle to the
# calling wrapped socket. This is a pretty silly approach, but for now it'll
# do. I feel like I should be able to smuggle a handle to the wrapped socket
# directly in the SSLConnectionRef, but for now this approach will work I
# guess.
#
# We need to lock around this structure for inserts, but we don't do it for
# reads/writes in the callbacks. The reasoning here goes as follows:
#
# 1. It is not possible to call into the callbacks before the dictionary is
# populated, so once in the callback the id must be in the dictionary.
# 2. The callbacks don't mutate the dictionary, they only read from it, and
# so cannot conflict with any of the insertions.
#
# This is good: if we had to lock in the callbacks we'd drastically slow down
# the performance of this code.
_connection_refs = weakref.WeakValueDictionary()
_connection_ref_lock = threading.Lock()
# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
# for no better reason than we need *a* limit, and this one is right there.
SSL_WRITE_BLOCKSIZE = 16384
# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
# individual cipher suites. We need to do this because this is how
# SecureTransport wants them.
CIPHER_SUITES = [
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_AES_256_GCM_SHA384,
SecurityConst.TLS_AES_128_GCM_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_AES_128_CCM_8_SHA256,
SecurityConst.TLS_AES_128_CCM_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
]
# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
# TLSv1 to 1.2 are supported on macOS 10.8+
_protocol_to_min_max = {
util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12)
}
if hasattr(ssl, "PROTOCOL_SSLv2"):
_protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
SecurityConst.kSSLProtocol2,
SecurityConst.kSSLProtocol2,
)
if hasattr(ssl, "PROTOCOL_SSLv3"):
_protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
SecurityConst.kSSLProtocol3,
SecurityConst.kSSLProtocol3,
)
if hasattr(ssl, "PROTOCOL_TLSv1"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
SecurityConst.kTLSProtocol1,
SecurityConst.kTLSProtocol1,
)
if hasattr(ssl, "PROTOCOL_TLSv1_1"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
SecurityConst.kTLSProtocol11,
SecurityConst.kTLSProtocol11,
)
if hasattr(ssl, "PROTOCOL_TLSv1_2"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
SecurityConst.kTLSProtocol12,
SecurityConst.kTLSProtocol12,
)
def inject_into_urllib3():
"""
Monkey-patch urllib3 with SecureTransport-backed SSL-support.
"""
util.SSLContext = SecureTransportContext
util.ssl_.SSLContext = SecureTransportContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_SECURETRANSPORT = True
util.ssl_.IS_SECURETRANSPORT = True
def extract_from_urllib3():
"""
Undo monkey-patching by :func:`inject_into_urllib3`.
"""
util.SSLContext = orig_util_SSLContext
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_SECURETRANSPORT = False
util.ssl_.IS_SECURETRANSPORT = False
def _read_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport read callback. This is called by ST to request that data
be returned from the socket.
"""
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
requested_length = data_length_pointer[0]
timeout = wrapped_socket.gettimeout()
error = None
read_count = 0
try:
while read_count < requested_length:
if timeout is None or timeout >= 0:
if not util.wait_for_read(base_socket, timeout):
raise socket.error(errno.EAGAIN, "timed out")
remaining = requested_length - read_count
buffer = (ctypes.c_char * remaining).from_address(
data_buffer + read_count
)
chunk_size = base_socket.recv_into(buffer, remaining)
read_count += chunk_size
if not chunk_size:
if not read_count:
return SecurityConst.errSSLClosedGraceful
break
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
data_length_pointer[0] = read_count
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = read_count
if read_count != requested_length:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal
def _write_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport write callback. This is called by ST to request that data
actually be sent on the network.
"""
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
bytes_to_write = data_length_pointer[0]
data = ctypes.string_at(data_buffer, bytes_to_write)
timeout = wrapped_socket.gettimeout()
error = None
sent = 0
try:
while sent < bytes_to_write:
if timeout is None or timeout >= 0:
if not util.wait_for_write(base_socket, timeout):
raise socket.error(errno.EAGAIN, "timed out")
chunk_sent = base_socket.send(data)
sent += chunk_sent
# This has some needless copying here, but I'm not sure there's
# much value in optimising this data path.
data = data[chunk_sent:]
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
data_length_pointer[0] = sent
if error == errno.ECONNRESET or error == errno.EPIPE:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = sent
if sent != bytes_to_write:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal
# We need to keep these two objects references alive: if they get GC'd while
# in use then SecureTransport could attempt to call a function that is in freed
# memory. That would be...uh...bad. Yeah, that's the word. Bad.
_read_callback_pointer = Security.SSLReadFunc(_read_callback)
_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
class WrappedSocket(object):
"""
API-compatibility wrapper for Python's OpenSSL wrapped socket object.
Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
collector of PyPy.
"""
def __init__(self, socket):
self.socket = socket
self.context = None
self._makefile_refs = 0
self._closed = False
self._exception = None
self._keychain = None
self._keychain_dir = None
self._client_cert_chain = None
# We save off the previously-configured timeout and then set it to
# zero. This is done because we use select and friends to handle the
# timeouts, but if we leave the timeout set on the lower socket then
# Python will "kindly" call select on that socket again for us. Avoid
# that by forcing the timeout to zero.
self._timeout = self.socket.gettimeout()
self.socket.settimeout(0)
@contextlib.contextmanager
def _raise_on_error(self):
"""
A context manager that can be used to wrap calls that do I/O from
SecureTransport. If any of the I/O callbacks hit an exception, this
context manager will correctly propagate the exception after the fact.
This avoids silently swallowing those exceptions.
It also correctly forces the socket closed.
"""
self._exception = None
# We explicitly don't catch around this yield because in the unlikely
# event that an exception was hit in the block we don't want to swallow
# it.
yield
if self._exception is not None:
exception, self._exception = self._exception, None
self.close()
raise exception
def _set_ciphers(self):
"""
Sets up the allowed ciphers. By default this matches the set in
util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
custom and doesn't allow changing at this time, mostly because parsing
OpenSSL cipher strings is going to be a freaking nightmare.
"""
ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
result = Security.SSLSetEnabledCiphers(
self.context, ciphers, len(CIPHER_SUITES)
)
_assert_no_error(result)
def _custom_validate(self, verify, trust_bundle):
"""
Called when we have set custom validation. We do this in two cases:
first, when cert validation is entirely disabled; and second, when
using a custom trust DB.
"""
# If we disabled cert validation, just say: cool.
if not verify:
return
# We want data in memory, so load it up.
if os.path.isfile(trust_bundle):
with open(trust_bundle, "rb") as f:
trust_bundle = f.read()
cert_array = None
trust = Security.SecTrustRef()
try:
# Get a CFArray that contains the certs we want.
cert_array = _cert_array_from_pem(trust_bundle)
# Ok, now the hard part. We want to get the SecTrustRef that ST has
# created for this connection, shove our CAs into it, tell ST to
# ignore everything else it knows, and then ask if it can build a
# chain. This is a buuuunch of code.
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
_assert_no_error(result)
if not trust:
raise ssl.SSLError("Failed to copy trust reference")
result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
_assert_no_error(result)
result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
_assert_no_error(result)
trust_result = Security.SecTrustResultType()
result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result))
_assert_no_error(result)
finally:
if trust:
CoreFoundation.CFRelease(trust)
if cert_array is not None:
CoreFoundation.CFRelease(cert_array)
# Ok, now we can look at what the result was.
successes = (
SecurityConst.kSecTrustResultUnspecified,
SecurityConst.kSecTrustResultProceed,
)
if trust_result.value not in successes:
raise ssl.SSLError(
"certificate verify failed, error code: %d" % trust_result.value
)
def handshake(
self,
server_hostname,
verify,
trust_bundle,
min_version,
max_version,
client_cert,
client_key,
client_key_passphrase,
):
"""
Actually performs the TLS handshake. This is run automatically by
wrapped socket, and shouldn't be needed in user code.
"""
# First, we do the initial bits of connection setup. We need to create
# a context, set its I/O funcs, and set the connection reference.
self.context = Security.SSLCreateContext(
None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
self.context, _read_callback_pointer, _write_callback_pointer
)
_assert_no_error(result)
# Here we need to compute the handle to use. We do this by taking the
# id of self modulo 2**31 - 1. If this is already in the dictionary, we
# just keep incrementing by one until we find a free space.
with _connection_ref_lock:
handle = id(self) % 2147483647
while handle in _connection_refs:
handle = (handle + 1) % 2147483647
_connection_refs[handle] = self
result = Security.SSLSetConnection(self.context, handle)
_assert_no_error(result)
# If we have a server hostname, we should set that too.
if server_hostname:
if not isinstance(server_hostname, bytes):
server_hostname = server_hostname.encode("utf-8")
result = Security.SSLSetPeerDomainName(
self.context, server_hostname, len(server_hostname)
)
_assert_no_error(result)
# Setup the ciphers.
self._set_ciphers()
# Set the minimum and maximum TLS versions.
result = Security.SSLSetProtocolVersionMin(self.context, min_version)
_assert_no_error(result)
result = Security.SSLSetProtocolVersionMax(self.context, max_version)
_assert_no_error(result)
# If there's a trust DB, we need to use it. We do that by telling
# SecureTransport to break on server auth. We also do that if we don't
# want to validate the certs at all: we just won't actually do any
# authing in that case.
if not verify or trust_bundle is not None:
result = Security.SSLSetSessionOption(
self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
)
_assert_no_error(result)
# If there's a client cert, we need to use it.
if client_cert:
self._keychain, self._keychain_dir = _temporary_keychain()
self._client_cert_chain = _load_client_cert_chain(
self._keychain, client_cert, client_key
)
result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
_assert_no_error(result)
while True:
with self._raise_on_error():
result = Security.SSLHandshake(self.context)
if result == SecurityConst.errSSLWouldBlock:
raise socket.timeout("handshake timed out")
elif result == SecurityConst.errSSLServerAuthCompleted:
self._custom_validate(verify, trust_bundle)
continue
else:
_assert_no_error(result)
break
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, bufsiz):
buffer = ctypes.create_string_buffer(bufsiz)
bytes_read = self.recv_into(buffer, bufsiz)
data = buffer[:bytes_read]
return data
def recv_into(self, buffer, nbytes=None):
# Read short on EOF.
if self._closed:
return 0
if nbytes is None:
nbytes = len(buffer)
buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
processed_bytes = ctypes.c_size_t(0)
with self._raise_on_error():
result = Security.SSLRead(
self.context, buffer, nbytes, ctypes.byref(processed_bytes)
)
# There are some result codes that we want to treat as "not always
# errors". Specifically, those are errSSLWouldBlock,
# errSSLClosedGraceful, and errSSLClosedNoNotify.
if result == SecurityConst.errSSLWouldBlock:
# If we didn't process any bytes, then this was just a time out.
# However, we can get errSSLWouldBlock in situations when we *did*
# read some data, and in those cases we should just read "short"
# and return.
if processed_bytes.value == 0:
# Timed out, no data read.
raise socket.timeout("recv timed out")
elif result in (
SecurityConst.errSSLClosedGraceful,
SecurityConst.errSSLClosedNoNotify,
):
# The remote peer has closed this connection. We should do so as
# well. Note that we don't actually return here because in
# principle this could actually be fired along with return data.
# It's unlikely though.
self.close()
else:
_assert_no_error(result)
# Ok, we read and probably succeeded. We should return whatever data
# was actually read.
return processed_bytes.value
def settimeout(self, timeout):
self._timeout = timeout
def gettimeout(self):
return self._timeout
def send(self, data):
processed_bytes = ctypes.c_size_t(0)
with self._raise_on_error():
result = Security.SSLWrite(
self.context, data, len(data), ctypes.byref(processed_bytes)
)
if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
# Timed out
raise socket.timeout("send timed out")
else:
_assert_no_error(result)
# We sent, and probably succeeded. Tell them how much we sent.
return processed_bytes.value
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
with self._raise_on_error():
Security.SSLClose(self.context)
def close(self):
# TODO: should I do clean shutdown here? Do I have to?
if self._makefile_refs < 1:
self._closed = True
if self.context:
CoreFoundation.CFRelease(self.context)
self.context = None
if self._client_cert_chain:
CoreFoundation.CFRelease(self._client_cert_chain)
self._client_cert_chain = None
if self._keychain:
Security.SecKeychainDelete(self._keychain)
CoreFoundation.CFRelease(self._keychain)
shutil.rmtree(self._keychain_dir)
self._keychain = self._keychain_dir = None
return self.socket.close()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
# Urgh, annoying.
#
# Here's how we do this:
#
# 1. Call SSLCopyPeerTrust to get hold of the trust object for this
# connection.
# 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
# 3. To get the CN, call SecCertificateCopyCommonName and process that
# string so that it's of the appropriate type.
# 4. To get the SAN, we need to do something a bit more complex:
# a. Call SecCertificateCopyValues to get the data, requesting
# kSecOIDSubjectAltName.
# b. Mess about with this dictionary to try to get the SANs out.
#
# This is gross. Really gross. It's going to be a few hundred LoC extra
# just to repeat something that SecureTransport can *already do*. So my
# operating assumption at this time is that what we want to do is
# instead to just flag to urllib3 that it shouldn't do its own hostname
# validation when using SecureTransport.
if not binary_form:
raise ValueError("SecureTransport only supports dumping binary certs")
trust = Security.SecTrustRef()
certdata = None
der_bytes = None
try:
# Grab the trust store.
result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
_assert_no_error(result)
if not trust:
# Probably we haven't done the handshake yet. No biggie.
return None
cert_count = Security.SecTrustGetCertificateCount(trust)
if not cert_count:
# Also a case that might happen if we haven't handshaked.
# Handshook? Handshaken?
return None
leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
assert leaf
# Ok, now we want the DER bytes.
certdata = Security.SecCertificateCopyData(leaf)
assert certdata
data_length = CoreFoundation.CFDataGetLength(certdata)
data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
der_bytes = ctypes.string_at(data_buffer, data_length)
finally:
if certdata:
CoreFoundation.CFRelease(certdata)
if trust:
CoreFoundation.CFRelease(trust)
return der_bytes
def version(self):
protocol = Security.SSLProtocol()
result = Security.SSLGetNegotiatedProtocolVersion(
self.context, ctypes.byref(protocol)
)
_assert_no_error(result)
if protocol.value == SecurityConst.kTLSProtocol13:
raise ssl.SSLError("SecureTransport does not support TLS 1.3")
elif protocol.value == SecurityConst.kTLSProtocol12:
return "TLSv1.2"
elif protocol.value == SecurityConst.kTLSProtocol11:
return "TLSv1.1"
elif protocol.value == SecurityConst.kTLSProtocol1:
return "TLSv1"
elif protocol.value == SecurityConst.kSSLProtocol3:
return "SSLv3"
elif protocol.value == SecurityConst.kSSLProtocol2:
return "SSLv2"
else:
raise ssl.SSLError("Unknown TLS version: %r" % protocol)
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
def makefile(self, mode="r", buffering=None, *args, **kwargs):
# We disable buffering with SecureTransport because it conflicts with
# the buffering that ST does internally (see issue #1153 for more).
buffering = 0
return backport_makefile(self, mode, buffering, *args, **kwargs)
WrappedSocket.makefile = makefile
class SecureTransportContext(object):
"""
I am a wrapper class for the SecureTransport library, to translate the
interface of the standard library ``SSLContext`` object to calls into
SecureTransport.
"""
def __init__(self, protocol):
self._min_version, self._max_version = _protocol_to_min_max[protocol]
self._options = 0
self._verify = False
self._trust_bundle = None
self._client_cert = None
self._client_key = None
self._client_key_passphrase = None
@property
def check_hostname(self):
"""
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
"""
return True
@check_hostname.setter
def check_hostname(self, value):
"""
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
"""
pass
@property
def options(self):
# TODO: Well, crap.
#
# So this is the bit of the code that is the most likely to cause us
# trouble. Essentially we need to enumerate all of the SSL options that
# users might want to use and try to see if we can sensibly translate
# them, or whether we should just ignore them.
return self._options
@options.setter
def options(self, value):
# TODO: Update in line with above.
self._options = value
@property
def verify_mode(self):
return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
@verify_mode.setter
def verify_mode(self, value):
self._verify = True if value == ssl.CERT_REQUIRED else False
def set_default_verify_paths(self):
# So, this has to do something a bit weird. Specifically, what it does
# is nothing.
#
# This means that, if we had previously had load_verify_locations
# called, this does not undo that. We need to do that because it turns
# out that the rest of the urllib3 code will attempt to load the
# default verify paths if it hasn't been told about any paths, even if
# the context itself was sometime earlier. We resolve that by just
# ignoring it.
pass
def load_default_certs(self):
return self.set_default_verify_paths()
def set_ciphers(self, ciphers):
# For now, we just require the default cipher string.
if ciphers != util.ssl_.DEFAULT_CIPHERS:
raise ValueError("SecureTransport doesn't support custom cipher strings")
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
# OK, we only really support cadata and cafile.
if capath is not None:
raise ValueError("SecureTransport does not support cert directories")
self._trust_bundle = cafile or cadata
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._client_cert = certfile
self._client_key = keyfile
self._client_cert_passphrase = password
def wrap_socket(
self,
sock,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None,
):
# So, what do we do here? Firstly, we assert some properties. This is a
# stripped down shim, so there is some functionality we don't support.
# See PEP 543 for the real deal.
assert not server_side
assert do_handshake_on_connect
assert suppress_ragged_eofs
# Ok, we're good to go. Now we want to create the wrapped socket object
# and store it in the appropriate place.
wrapped_socket = WrappedSocket(sock)
# Now we can handshake
wrapped_socket.handshake(
server_hostname,
self._verify,
self._trust_bundle,
self._min_version,
self._max_version,
self._client_cert,
self._client_key,
self._client_key_passphrase,
)
return wrapped_socket
| isc | -2,987,171,502,642,009,000 | 36.572759 | 86 | 0.636654 | false |
daxiaodou/opendcp | octans/octans/worker/pool.py | 5 | 1115 | #!/usr/bin/env python
#
# Copyright (C) 2016 Weibo Inc.
#
# This file is part of Opendcp.
#
# Opendcp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# Opendcp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Opendcp. if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
# Author: whiteblue
# Created : 16/8/16
from multiprocessing import Process, Pool
class NoDaemonProcess(Process):
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
class ProcessPool(Pool):
Process = NoDaemonProcess
| gpl-2.0 | 8,095,804,657,252,306,000 | 26.195122 | 78 | 0.697758 | false |
supergis/QGIS | python/plugins/processing/algs/qgis/QGISAlgorithmProvider.py | 2 | 10103 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QGISAlgorithmProvider.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
try:
import matplotlib.pyplot
hasMatplotlib = True
except:
hasMatplotlib = False
from PyQt4.QtGui import QIcon
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.script.ScriptUtils import ScriptUtils
from RegularPoints import RegularPoints
from SymmetricalDifference import SymmetricalDifference
from VectorSplit import VectorSplit
from VectorGrid import VectorGrid
from RandomExtract import RandomExtract
from RandomExtractWithinSubsets import RandomExtractWithinSubsets
from ExtractByLocation import ExtractByLocation
from PointsInPolygon import PointsInPolygon
from PointsInPolygonUnique import PointsInPolygonUnique
from PointsInPolygonWeighted import PointsInPolygonWeighted
from SumLines import SumLines
from BasicStatisticsNumbers import BasicStatisticsNumbers
from BasicStatisticsStrings import BasicStatisticsStrings
from NearestNeighbourAnalysis import NearestNeighbourAnalysis
from LinesIntersection import LinesIntersection
from MeanCoords import MeanCoords
from PointDistance import PointDistance
from UniqueValues import UniqueValues
from ReprojectLayer import ReprojectLayer
from ExportGeometryInfo import ExportGeometryInfo
from Centroids import Centroids
from Delaunay import Delaunay
from VoronoiPolygons import VoronoiPolygons
from DensifyGeometries import DensifyGeometries
from MultipartToSingleparts import MultipartToSingleparts
from SimplifyGeometries import SimplifyGeometries
from LinesToPolygons import LinesToPolygons
from PolygonsToLines import PolygonsToLines
from SinglePartsToMultiparts import SinglePartsToMultiparts
from ExtractNodes import ExtractNodes
from ConvexHull import ConvexHull
from FixedDistanceBuffer import FixedDistanceBuffer
from VariableDistanceBuffer import VariableDistanceBuffer
from Clip import Clip
from Difference import Difference
from Dissolve import Dissolve
from Intersection import Intersection
from ExtentFromLayer import ExtentFromLayer
from RandomSelection import RandomSelection
from RandomSelectionWithinSubsets import RandomSelectionWithinSubsets
from SelectByLocation import SelectByLocation
from Union import Union
from DensifyGeometriesInterval import DensifyGeometriesInterval
from Eliminate import Eliminate
from SpatialJoin import SpatialJoin
from DeleteColumn import DeleteColumn
from DeleteHoles import DeleteHoles
from DeleteDuplicateGeometries import DeleteDuplicateGeometries
from TextToFloat import TextToFloat
from ExtractByAttribute import ExtractByAttribute
from SelectByAttribute import SelectByAttribute
from Grid import Grid
from Gridify import Gridify
from HubDistance import HubDistance
from HubLines import HubLines
from Merge import Merge
from GeometryConvert import GeometryConvert
from ConcaveHull import ConcaveHull
from Polygonize import Polygonize
from RasterLayerStatistics import RasterLayerStatistics
from StatisticsByCategories import StatisticsByCategories
from EquivalentNumField import EquivalentNumField
from AddTableField import AddTableField
from FieldsCalculator import FieldsCalculator
from SaveSelectedFeatures import SaveSelectedFeatures
from Explode import Explode
from AutoincrementalField import AutoincrementalField
from FieldPyculator import FieldsPyculator
from JoinAttributes import JoinAttributes
from CreateConstantRaster import CreateConstantRaster
from PointsLayerFromTable import PointsLayerFromTable
from PointsDisplacement import PointsDisplacement
from ZonalStatistics import ZonalStatistics
from PointsFromPolygons import PointsFromPolygons
from PointsFromLines import PointsFromLines
from RandomPointsExtent import RandomPointsExtent
from RandomPointsLayer import RandomPointsLayer
from RandomPointsPolygonsFixed import RandomPointsPolygonsFixed
from RandomPointsPolygonsVariable import RandomPointsPolygonsVariable
from RandomPointsAlongLines import RandomPointsAlongLines
from PointsToPaths import PointsToPaths
from PostGISExecuteSQL import PostGISExecuteSQL
from ImportIntoPostGIS import ImportIntoPostGIS
from SetVectorStyle import SetVectorStyle
from SetRasterStyle import SetRasterStyle
from SelectByExpression import SelectByExpression
from SelectByAttributeSum import SelectByAttributeSum
from HypsometricCurves import HypsometricCurves
from SplitLinesWithLines import SplitLinesWithLines
from FieldsMapper import FieldsMapper
from Datasources2Vrt import Datasources2Vrt
from CheckValidity import CheckValidity
from OrientedMinimumBoundingBox import OrientedMinimumBoundingBox
from Smooth import Smooth
from ReverseLineDirection import ReverseLineDirection
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class QGISAlgorithmProvider(AlgorithmProvider):
_icon = QIcon(os.path.join(pluginPath, 'images', 'qgis.png'))
def __init__(self):
AlgorithmProvider.__init__(self)
self.alglist = [SumLines(), PointsInPolygon(),
PointsInPolygonWeighted(), PointsInPolygonUnique(),
BasicStatisticsStrings(), BasicStatisticsNumbers(),
NearestNeighbourAnalysis(), MeanCoords(),
LinesIntersection(), UniqueValues(), PointDistance(),
ReprojectLayer(), ExportGeometryInfo(), Centroids(),
Delaunay(), VoronoiPolygons(), SimplifyGeometries(),
DensifyGeometries(), DensifyGeometriesInterval(),
MultipartToSingleparts(), SinglePartsToMultiparts(),
PolygonsToLines(), LinesToPolygons(), ExtractNodes(),
Eliminate(), ConvexHull(), FixedDistanceBuffer(),
VariableDistanceBuffer(), Dissolve(), Difference(),
Intersection(), Union(), Clip(), ExtentFromLayer(),
RandomSelection(), RandomSelectionWithinSubsets(),
SelectByLocation(), RandomExtract(), DeleteHoles(),
RandomExtractWithinSubsets(), ExtractByLocation(),
SpatialJoin(), RegularPoints(), SymmetricalDifference(),
VectorSplit(), VectorGrid(), DeleteColumn(),
DeleteDuplicateGeometries(), TextToFloat(),
ExtractByAttribute(), SelectByAttribute(), Grid(),
Gridify(), HubDistance(), HubLines(), Merge(),
GeometryConvert(), AddTableField(), FieldsCalculator(),
SaveSelectedFeatures(), JoinAttributes(),
AutoincrementalField(), Explode(), FieldsPyculator(),
EquivalentNumField(), PointsLayerFromTable(),
StatisticsByCategories(), ConcaveHull(), Polygonize(),
RasterLayerStatistics(), PointsDisplacement(),
ZonalStatistics(), PointsFromPolygons(),
PointsFromLines(), RandomPointsExtent(),
RandomPointsLayer(), RandomPointsPolygonsFixed(),
RandomPointsPolygonsVariable(),
RandomPointsAlongLines(), PointsToPaths(),
PostGISExecuteSQL(), ImportIntoPostGIS(),
SetVectorStyle(), SetRasterStyle(),
SelectByExpression(), HypsometricCurves(),
SplitLinesWithLines(), CreateConstantRaster(),
FieldsMapper(), SelectByAttributeSum(), Datasources2Vrt(),
CheckValidity(), OrientedMinimumBoundingBox(), Smooth(),
ReverseLineDirection()
]
if hasMatplotlib:
from VectorLayerHistogram import VectorLayerHistogram
from RasterLayerHistogram import RasterLayerHistogram
from VectorLayerScatterplot import VectorLayerScatterplot
from MeanAndStdDevPlot import MeanAndStdDevPlot
from BarPlot import BarPlot
from PolarPlot import PolarPlot
self.alglist.extend([
VectorLayerHistogram(), RasterLayerHistogram(),
VectorLayerScatterplot(), MeanAndStdDevPlot(), BarPlot(),
PolarPlot(),
])
folder = os.path.join(os.path.dirname(__file__), 'scripts')
scripts = ScriptUtils.loadFromFolder(folder)
for script in scripts:
script.allowEdit = False
self.alglist.extend(scripts)
for alg in self.alglist:
alg._icon = self._icon
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
def unload(self):
AlgorithmProvider.unload(self)
def getName(self):
return 'qgis'
def getDescription(self):
return self.tr('QGIS geoalgorithms')
def getIcon(self):
return self._icon
def _loadAlgorithms(self):
self.algs = self.alglist
def supportsNonFileBasedOutput(self):
return True
| gpl-2.0 | 5,348,616,441,299,567,000 | 43.117904 | 82 | 0.698505 | false |
saschwarz/django-gcse | gcse/urls.py | 1 | 3699 | from django.conf.urls import patterns, url
from django.views.generic import TemplateView
from .views import (AnnotationList, AnnotationSearchList, AnnotationDetail,
CSEAnnotations, CSEAnnotationList,
CustomSearchEngineList, CustomSearchEngineResults,
CustomSearchEngineDetail, CustomSearchEngineDetailXML,
CSELabelList, CSELabelDetail, LabelDetail, LabelList,
)
urlpatterns = patterns('gcse.views',
# urls for Google search related resources
url(r'^(?P<gid>[\w-]+).xml$',
CustomSearchEngineDetailXML.as_view(),
name='gcse_cse'),
url(r'^annotations/(?P<gid>[\w-]+).(?P<page>\w+).xml$',
CSEAnnotations.as_view(),
name='gcse_annotations'),
# all CSEs
url(r'^cses/$',
CustomSearchEngineList.as_view(),
name='gcse_cse_list'),
# a single CSE
url(r'^cses/(?P<gid>[\w-]+)/$',
CustomSearchEngineDetail.as_view(),
name='gcse_cse_detail'),
# display Google Search results
url(r'^cses/(?P<gid>[\w-]+)/results/$',
CustomSearchEngineResults.as_view(),
name='gcse_results'),
# all Annotations
url(r'^annotations/$',
AnnotationList.as_view(),
name='gcse_annotation_list'),
# Search for Annotations containing string
url(r'^annotations/search/$',
AnnotationSearchList.as_view(),
name='gcse_search'),
# One Annotation
url(r'^annotations/(?P<id>.+)/$',
AnnotationDetail.as_view(),
name='gcse_annotation_detail'),
# Annotations for one CSE
url(r'^cses/(?P<gid>[\w-]+)/annotations/$',
CSEAnnotationList.as_view(),
name='gcse_cse_annotation_list'),
# all Labels
url(r'^labels/$',
LabelList.as_view(),
name="gcse_label_list"),
# One Label (all CSEs)
url(r'^labels/(?P<id>.+)/$',
LabelDetail.as_view(),
name='gcse_label_detail'),
# Labels for one CSE
url(r'^cses/(?P<gid>[\w-]+)/labels/$',
CSELabelList.as_view(),
name='gcse_cse_label_list'),
# One CSE's Annotations for one Label
url(r'^cses/(?P<gid>.+)/labels/(?P<id>.+)/$',
CSELabelDetail.as_view(),
name='gcse_cse_label_detail'),
# url(r'^site/edit/(?P<id>\d+)/$', 'edit', name='edit'),
# url(r'^site/view/(?P<id>\d+)/$', 'view', name='view'),
# url(r'^site/add/$', 'edit', {'add': True}, name='add'),
# url(r'^site/add/thanks/$', TemplateView.as_view(template_name='gcse/thanks.html'), name='thanks'),
)
| bsd-3-clause | 6,814,967,945,531,935,000 | 44.666667 | 124 | 0.403893 | false |
ktnyt/chainer | tests/chainer_tests/utils_tests/test_utils.py | 21 | 1830 | import unittest
import numpy
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'dtype': [None, numpy.float16, numpy.float32, numpy.float64],
}))
class TestForceArray(unittest.TestCase):
def test_scalar(self):
x = utils.force_array(numpy.float32(1), dtype=self.dtype)
self.assertIsInstance(x, numpy.ndarray)
if self.dtype is None:
self.assertEqual(x.dtype, numpy.float32)
else:
self.assertEqual(x.dtype, self.dtype)
def test_0dim_array(self):
x = utils.force_array(numpy.array(1, numpy.float32), dtype=self.dtype)
self.assertIsInstance(x, numpy.ndarray)
if self.dtype is None:
self.assertEqual(x.dtype, numpy.float32)
else:
self.assertEqual(x.dtype, self.dtype)
def test_array(self):
x = utils.force_array(numpy.array([1], numpy.float32),
dtype=self.dtype)
self.assertIsInstance(x, numpy.ndarray)
if self.dtype is None:
self.assertEqual(x.dtype, numpy.float32)
else:
self.assertEqual(x.dtype, self.dtype)
class TestForceType(unittest.TestCase):
def test_force_type_scalar(self):
x = numpy.int32(1)
y = utils.force_type(numpy.dtype(numpy.float32), x)
self.assertEqual(y.dtype, numpy.float32)
def test_force_type_array(self):
x = numpy.array([1], dtype=numpy.int32)
y = utils.force_type(numpy.dtype(numpy.float32), x)
self.assertEqual(y.dtype, numpy.float32)
def test_force_type_array_no_change(self):
x = numpy.array([1], dtype=numpy.float32)
y = utils.force_type(numpy.dtype(numpy.float32), x)
self.assertEqual(y.dtype, numpy.float32)
testing.run_module(__name__, __file__)
| mit | -290,197,506,361,485,700 | 30.551724 | 78 | 0.63388 | false |
morenice/learn-algorithms | algorithms/sorting/merge.py | 2 | 1606 | # -*- coding: utf-8 -*-
import cProfile
import pstats
import sys
def read_data_to_list(input_list, input_count):
for n in range(input_count):
input_list.append(int(input()))
def merge_sort(alist):
if len(alist) <= 1:
return
mid = len(alist)//2
left_half = alist[:mid]
right_half = alist[mid:]
merge_sort(left_half)
merge_sort(right_half)
left_iter=0
right_iter=0
alist_iter=0
while left_iter < len(left_half) and right_iter < len(right_half):
if left_half[left_iter] < right_half[right_iter]:
alist[alist_iter]=left_half[left_iter]
left_iter=left_iter+1
else:
alist[alist_iter]=right_half[right_iter]
right_iter=right_iter+1
alist_iter=alist_iter+1
while left_iter < len(left_half):
alist[alist_iter]=left_half[left_iter]
left_iter=left_iter+1
alist_iter=alist_iter+1
while right_iter < len(right_half):
alist[alist_iter]=right_half[right_iter]
right_iter=right_iter+1
alist_iter=alist_iter+1
if __name__ == '__main__':
is_profile_mode = False
if len(sys.argv) > 1 and sys.argv[1] == "profile":
is_profile_mode = True
if is_profile_mode:
pr = cProfile.Profile()
pr.enable()
input_list = list()
input_len = int(input())
read_data_to_list(input_list, input_len)
merge_sort(input_list)
for p in input_list:
print(p)
if is_profile_mode:
pr.disable()
ps = pstats.Stats(pr).sort_stats('cumulative')
ps.print_stats()
| mit | 199,561,442,479,758,560 | 22.970149 | 70 | 0.587796 | false |
dagwieers/ansible | test/units/modules/network/cnos/test_cnos_linkagg.py | 32 | 4430 | #
# (c) 2018 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from units.compat.mock import patch
from ansible.modules.network.cnos import cnos_linkagg
from units.modules.utils import set_module_args
from .cnos_module import TestCnosModule, load_fixture
class TestCnosLinkaggModule(TestCnosModule):
module = cnos_linkagg
def setUp(self):
super(TestCnosLinkaggModule, self).setUp()
self._patch_get_config = patch(
'ansible.modules.network.cnos.cnos_linkagg.get_config'
)
self._patch_load_config = patch(
'ansible.modules.network.cnos.cnos_linkagg.load_config'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
def tearDown(self):
super(TestCnosLinkaggModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
def load_fixtures(self, commands=None):
config_file = 'cnos_linkagg_config.cfg'
self._get_config.return_value = load_fixture(config_file)
self._load_config.return_value = None
def test_cnos_linkagg_group_present(self, *args, **kwargs):
set_module_args(dict(
group='10',
state='present'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 10',
'exit'
],
'changed': True
}
)
def test_cnos_linkagg_group_members_active(self, *args, **kwargs):
set_module_args(dict(
group='10',
mode='active',
members=[
'Ethernet 1/33',
'Ethernet 1/44'
]
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 10',
'exit',
'interface Ethernet 1/33',
'channel-group 10 mode active',
'interface Ethernet 1/44',
'channel-group 10 mode active'
],
'changed': True
}
)
def test_cnos_linkagg_group_member_removal(self, *args, **kwargs):
set_module_args(dict(
group='20',
mode='active',
members=[
'Ethernet 1/10',
]
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface port-channel 20',
'exit',
'interface Ethernet 1/10',
'channel-group 20 mode active'
],
'changed': True
}
)
def test_cnos_linkagg_group_members_absent(self, *args, **kwargs):
set_module_args(dict(
group='20',
state='absent'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'no interface port-channel 20'
],
'changed': True
}
)
set_module_args(dict(
group='10',
state='absent'
))
result = self.execute_module(changed=False)
self.assertEqual(
result,
{
'commands': [],
'changed': False
}
)
| gpl-3.0 | 2,301,726,556,821,494,000 | 29.136054 | 70 | 0.52912 | false |
samedder/azure-cli | scripts/scrub_vcr_auth.py | 8 | 2271 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
import os
import tempfile
COMMAND_MODULE_PREFIX = 'azure-cli-'
PATH_TO_COMMAND_MODULES = os.path.abspath(os.path.join(os.path.abspath(__file__),
'..', '..', 'src',#'..', '..', '..', '..',
'command_modules'))
command_modules = []
insecure_cassettes = []
for name in os.listdir(PATH_TO_COMMAND_MODULES):
full_module_path = os.path.join(PATH_TO_COMMAND_MODULES, name)
if name.startswith(COMMAND_MODULE_PREFIX) and os.path.isdir(full_module_path):
command_modules += [(name, full_module_path)]
for name, fullpath in command_modules:
path_to_recordings = os.path.join(fullpath, 'azure', 'cli', 'command_modules',
name.replace(COMMAND_MODULE_PREFIX, ''),
'tests', 'recordings')
if not os.path.isdir(path_to_recordings):
continue
for name in os.listdir(path_to_recordings):
if not str.endswith(name, '.yaml'):
continue
src_path = os.path.join(path_to_recordings, name)
t = tempfile.NamedTemporaryFile('r+')
with open(src_path, 'r') as f:
for line in f:
if 'authorization: [bearer' in line.lower():
insecure_cassettes.append(name)
else:
t.write(line)
t.seek(0)
with open(src_path, 'w') as f:
for line in t:
f.write(line)
t.close()
insecure_cassettes = list(set(insecure_cassettes))
if insecure_cassettes:
print('Bearer tokens removed from the following cassettes:')
for cassette in insecure_cassettes:
print('\t{}'.format(cassette))
else:
print('All cassettes free from Bearer tokens!')
| mit | -4,512,198,097,377,098,000 | 43.529412 | 97 | 0.502422 | false |
Ultimaker/Cura | plugins/Toolbox/src/CloudSync/SyncOrchestrator.py | 1 | 5714 | # Copyright (c) 2021 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import os
from typing import List, Dict, Any, cast
from UM import i18n_catalog
from UM.Extension import Extension
from UM.Logger import Logger
from UM.Message import Message
from UM.PluginRegistry import PluginRegistry
from cura.CuraApplication import CuraApplication
from .CloudPackageChecker import CloudPackageChecker
from .CloudApiClient import CloudApiClient
from .DiscrepanciesPresenter import DiscrepanciesPresenter
from .DownloadPresenter import DownloadPresenter
from .LicensePresenter import LicensePresenter
from .RestartApplicationPresenter import RestartApplicationPresenter
from .SubscribedPackagesModel import SubscribedPackagesModel
class SyncOrchestrator(Extension):
"""Orchestrates the synchronizing of packages from the user account to the installed packages
Example flow:
- CloudPackageChecker compares a list of packages the user `subscribed` to in their account
If there are `discrepancies` between the account and locally installed packages, they are emitted
- DiscrepanciesPresenter shows a list of packages to be added or removed to the user. It emits the `packageMutations`
the user selected to be performed
- The SyncOrchestrator uses PackageManager to remove local packages the users wants to see removed
- The DownloadPresenter shows a download progress dialog. It emits A tuple of succeeded and failed downloads
- The LicensePresenter extracts licenses from the downloaded packages and presents a license for each package to
be installed. It emits the `licenseAnswers` signal for accept or declines
- The CloudApiClient removes the declined packages from the account
- The SyncOrchestrator uses PackageManager to install the downloaded packages and delete temp files.
- The RestartApplicationPresenter notifies the user that a restart is required for changes to take effect
"""
def __init__(self, app: CuraApplication) -> None:
super().__init__()
# Differentiate This PluginObject from the Toolbox. self.getId() includes _name.
# getPluginId() will return the same value for The toolbox extension and this one
self._name = "SyncOrchestrator"
self._package_manager = app.getPackageManager()
# Keep a reference to the CloudApiClient. it watches for installed packages and subscribes to them
self._cloud_api = CloudApiClient.getInstance(app) # type: CloudApiClient
self._checker = CloudPackageChecker(app) # type: CloudPackageChecker
self._checker.discrepancies.connect(self._onDiscrepancies)
self._discrepancies_presenter = DiscrepanciesPresenter(app) # type: DiscrepanciesPresenter
self._discrepancies_presenter.packageMutations.connect(self._onPackageMutations)
self._download_presenter = DownloadPresenter(app) # type: DownloadPresenter
self._license_presenter = LicensePresenter(app) # type: LicensePresenter
self._license_presenter.licenseAnswers.connect(self._onLicenseAnswers)
self._restart_presenter = RestartApplicationPresenter(app)
def _onDiscrepancies(self, model: SubscribedPackagesModel) -> None:
plugin_path = cast(str, PluginRegistry.getInstance().getPluginPath(self.getPluginId()))
self._discrepancies_presenter.present(plugin_path, model)
def _onPackageMutations(self, mutations: SubscribedPackagesModel) -> None:
self._download_presenter = self._download_presenter.resetCopy()
self._download_presenter.done.connect(self._onDownloadFinished)
self._download_presenter.download(mutations)
def _onDownloadFinished(self, success_items: Dict[str, Dict[str, str]], error_items: List[str]) -> None:
"""Called when a set of packages have finished downloading
:param success_items:: Dict[package_id, Dict[str, str]]
:param error_items:: List[package_id]
"""
if error_items:
message = i18n_catalog.i18nc("@info:generic", "{} plugins failed to download".format(len(error_items)))
self._showErrorMessage(message)
plugin_path = cast(str, PluginRegistry.getInstance().getPluginPath(self.getPluginId()))
self._license_presenter = self._license_presenter.resetCopy()
self._license_presenter.licenseAnswers.connect(self._onLicenseAnswers)
self._license_presenter.present(plugin_path, success_items)
# Called when user has accepted / declined all licenses for the downloaded packages
def _onLicenseAnswers(self, answers: List[Dict[str, Any]]) -> None:
has_changes = False # True when at least one package is installed
for item in answers:
if item["accepted"]:
# install and subscribe packages
if not self._package_manager.installPackage(item["package_path"]):
message = "Could not install {}".format(item["package_id"])
self._showErrorMessage(message)
continue
has_changes = True
else:
self._cloud_api.unsubscribe(item["package_id"])
# delete temp file
try:
os.remove(item["package_path"])
except EnvironmentError as e: # File was already removed, no access rights, etc.
Logger.error("Can't delete temporary package file: {err}".format(err = str(e)))
if has_changes:
self._restart_presenter.present()
def _showErrorMessage(self, text: str):
"""Logs an error and shows it to the user"""
Logger.error(text)
Message(text, lifetime=0).show()
| lgpl-3.0 | -1,155,425,382,791,225,000 | 49.122807 | 121 | 0.712636 | false |
wevoice/wesub | apps/api/tests/test_activity.py | 1 | 16762 | # Amara, universalsubtitles.org
#
# Copyright (C) 2015 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see http://www.gnu.org/licenses/agpl-3.0.html.
from __future__ import absolute_import
from datetime import datetime
import time
from django.test import TestCase
from nose.tools import *
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APIClient, APIRequestFactory
from api.tests.utils import format_datetime_field, user_field_data
from comments.models import Comment
from subtitles import pipeline
from utils.factories import *
from activity.models import ActivityRecord
class ActivityTest(TestCase):
def setUp(self):
self.user = UserFactory(username='test-user')
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def clear_records(self):
ActivityRecord.objects.all().delete()
def check_list(self, url, *records):
response = self.client.get(url)
assert_equal(response.status_code, status.HTTP_200_OK)
assert_equal(len(response.data['objects']), len(records))
for data, record in zip(response.data['objects'], records):
self.check_data(data, record)
def check_data(self, activity_data, record):
assert_equal(activity_data['type'], record.type)
assert_equal(activity_data['date'],
format_datetime_field(record.created))
if record.video:
assert_equal(activity_data['video'], record.video.video_id)
assert_equal(activity_data['video_uri'], reverse(
'api:video-detail', kwargs={
'video_id': record.video.video_id,
}, request=APIRequestFactory().get('/'))
)
else:
assert_equal(activity_data['video'], None)
assert_equal(activity_data['video_uri'], None)
if record.language_code:
assert_equal(activity_data['language'], record.language_code)
assert_equal(activity_data['language_uri'], reverse(
'api:subtitle-language-detail', kwargs={
'video_id': record.video.video_id,
'language_code': record.language_code,
}, request=APIRequestFactory().get('/'))
)
else:
assert_equal(activity_data['language'], None)
assert_equal(activity_data['language_uri'], None)
assert_equal(activity_data['user'], user_field_data(record.user))
def test_video(self):
video = VideoFactory(user=self.user)
other_video = VideoFactory()
v1 = pipeline.add_subtitles(video, 'en', SubtitleSetFactory())
v2 = pipeline.add_subtitles(video, 'fr', SubtitleSetFactory(),
author=self.user)
self.clear_records()
record1 = ActivityRecord.objects.create_for_video_added(video)
record2 = ActivityRecord.objects.create_for_subtitle_version(v1)
record3 = ActivityRecord.objects.create_for_subtitle_version(v2)
# this record should never be listed in the endpoint
ActivityRecord.objects.create_for_video_added(other_video)
url = reverse('api:video-activity', args=(video.video_id,))
self.check_list(url, record3, record2, record1)
self.check_list(url + '?type=video-added', record1)
self.check_list(url + '?user=test-user', record3, record1)
self.check_list(url + '?language=en', record2)
self.check_list(
url + '?before=' + format_datetime_field(record2.created),
record1)
self.check_list(
url + '?after=' + format_datetime_field(record2.created),
record3, record2)
self.check_list(url + '?user=test-user&language=fr', record3)
def test_user(self):
video1 = VideoFactory(user=self.user, video_id='video1',
primary_audio_language_code='fr')
video2 = VideoFactory(user=self.user, video_id='video2',
team=TeamFactory(slug='team'))
other_video = VideoFactory()
v1 = pipeline.add_subtitles(video1, 'en', SubtitleSetFactory(),
author=self.user)
self.clear_records()
record1 = ActivityRecord.objects.create_for_video_added(video1)
record2 = ActivityRecord.objects.create_for_video_added(video2)
record3 = ActivityRecord.objects.create_for_subtitle_version(v1)
# this record should never be listed in the endpoint
ActivityRecord.objects.create_for_video_added(other_video)
url = reverse('api:user-activity', args=(self.user.username,))
self.check_list(url, record3, record2, record1)
self.check_list(url + '?video=video1', record3, record1)
self.check_list(url + '?team=team', record2)
self.check_list(url + '?video_language=fr', record3, record1)
self.check_list(url + '?type=video-added', record2, record1)
self.check_list(url + '?language=en', record3)
self.check_list(
url + '?before=' + format_datetime_field(record2.created),
record1)
self.check_list(
url + '?after=' + format_datetime_field(record2.created),
record3, record2)
def test_team(self):
team = TeamFactory(slug='team')
video1 = VideoFactory(video_id='video1',
primary_audio_language_code='fr', team=team)
video2 = VideoFactory(video_id='video2', team=team)
other_video = VideoFactory()
v1 = pipeline.add_subtitles(video1, 'en', SubtitleSetFactory(),
author=self.user)
self.clear_records()
record1 = ActivityRecord.objects.create_for_video_added(video1)
record2 = ActivityRecord.objects.create_for_video_added(video2)
record3 = ActivityRecord.objects.create_for_subtitle_version(v1)
# this record should never be listed in the endpoint
ActivityRecord.objects.create_for_video_added(other_video)
url = reverse('api:team-activity', args=(team.slug,))
self.check_list(url, record3, record2, record1)
self.check_list(url + '?video=video1', record3, record1)
self.check_list(url + '?user=test-user', record3)
self.check_list(url + '?video_language=fr', record3, record1)
self.check_list(url + '?type=video-added', record2, record1)
self.check_list(url + '?language=en', record3)
self.check_list(
url + '?before=' + format_datetime_field(record2.created),
record1)
self.check_list(
url + '?after=' + format_datetime_field(record2.created),
record3, record2)
def check_extra_field(self, activity_type, **extra_fields):
# We should be able to get just the record we care about by using our
# user stream and filtering by activity type
url = reverse('api:user-activity', args=(self.user.username,))
response = self.client.get(url + '?type={}'.format(activity_type))
assert_equal(response.status_code, status.HTTP_200_OK)
assert_equal(len(response.data['objects']), 1)
data = response.data['objects'][0]
for name, value in extra_fields.items():
assert_equal(data[name], value)
def test_video_url_added(self):
video = VideoFactory()
vurl = VideoURLFactory(video=video, added_by=self.user)
ActivityRecord.objects.create_for_video_url_added(vurl)
self.check_extra_field('video-url-added', url=vurl.url)
def test_video_url_edited(self):
video = VideoFactory()
old_vurl = video.get_primary_videourl_obj()
new_vurl = VideoURLFactory(video=video)
ActivityRecord.objects.create_for_video_url_made_primary(
new_vurl, old_vurl, self.user)
self.check_extra_field('video-url-edited', new_url=new_vurl.url,
old_url=old_vurl.url)
def test_video_url_deleted(self):
video = VideoFactory()
vurl = VideoURLFactory(video=video)
ActivityRecord.objects.create_for_video_url_deleted(vurl, self.user)
self.check_extra_field('video-url-deleted', url=vurl.url)
def test_video_deleted(self):
video = VideoFactory()
ActivityRecord.objects.create_for_video_deleted(video, self.user)
self.check_extra_field('video-deleted', title=video.title_display())
class LegacyActivityTestCase(TestCase):
def setUp(self):
self.user = UserFactory()
self.client = APIClient()
self.client.force_authenticate(user=self.user)
self.list_url = reverse('api:activity-list')
# create a bunch of activity records of various types
self.team = TeamFactory()
self.team_member = TeamMemberFactory(user=self.user, team=self.team)
self.video = VideoFactory(user=self.user)
TeamVideoFactory(video=self.video, team=self.team)
self.user2 = UserFactory()
ActivityRecord.objects.create_for_video_added(self.video)
self.video.title = 'new-title'
self.video.save()
v = pipeline.add_subtitles(self.video, 'en', None, author=self.user)
ActivityRecord.objects.create_for_subtitle_version(v)
ActivityRecord.objects.create_for_version_approved(v, self.user2)
ActivityRecord.objects.create_for_version_rejected(v, self.user2)
ActivityRecord.objects.create_for_new_member(self.team_member)
ActivityRecord.objects.create_for_member_deleted(self.team_member)
self.record_qs = ActivityRecord.objects.all()
def detail_url(self, record):
return reverse('api:activity-detail', (record.id,))
def filtered_list_url(self, filters):
query = '&'.join('{}={}'.format(k, v) for k, v in filters.items())
return '{}?{}'.format(self.list_url, query)
def check_activity_data(self, activity_data, record):
assert_equal(activity_data['id'], record.id)
assert_equal(activity_data['type'], record.type_code)
assert_equal(activity_data['type_name'], record.type)
assert_equal(activity_data['created'],
format_datetime_field(record.created))
if record.type == 'video-url-edited':
assert_equal(activity_data['new_video_title'],
record.get_related_obj().new_title)
else:
assert_equal(activity_data['new_video_title'], None)
if record.type == 'comment-added':
assert_equal(activity_data['comment'],
record.get_related_obj().content)
else:
assert_equal(activity_data['comment'], None)
assert_equal(activity_data['resource_uri'], reverse(
'api:activity-detail', kwargs={'id': record.id},
request=APIRequestFactory().get('/')))
if record.video:
assert_equal(activity_data['video'], record.video.video_id)
assert_equal(activity_data['video_uri'], reverse(
'api:video-detail', kwargs={
'video_id': record.video.video_id,
}, request=APIRequestFactory().get('/'))
)
else:
assert_equal(activity_data['video'], None)
assert_equal(activity_data['video_uri'], None)
if record.language_code:
assert_equal(activity_data['language'], record.language_code)
assert_equal(activity_data['language_url'], reverse(
'api:subtitle-language-detail', kwargs={
'video_id': record.video.video_id,
'language_code': record.language_code,
}, request=APIRequestFactory().get('/'))
)
else:
assert_equal(activity_data['language'], None)
assert_equal(activity_data['language_url'], None)
if record.user:
assert_equal(activity_data['user'], record.user.username)
else:
assert_equal(activity_data['user'], None)
def test_list(self):
activity_map = {a.id: a for a in self.record_qs}
response = self.client.get(self.list_url)
assert_equal(response.status_code, status.HTTP_200_OK)
assert_items_equal([a['id'] for a in response.data['objects']],
activity_map.keys())
for activity_data in response.data['objects']:
self.check_activity_data(activity_data,
activity_map[activity_data['id']])
def test_detail(self):
for record in self.record_qs:
response = self.client.get(self.detail_url(record))
assert_equal(response.status_code, status.HTTP_200_OK)
self.check_activity_data(response.data, record)
def check_filter(self, filters, correct_records):
response = self.client.get(self.filtered_list_url(filters))
assert_equal(response.status_code, status.HTTP_200_OK)
assert_items_equal([a['id'] for a in response.data['objects']],
[a.id for a in correct_records])
def test_team_filter(self):
self.check_filter({
'team': self.team.slug,
}, ActivityRecord.objects.filter(team=self.team, video__isnull=False))
def test_team_activity_flag(self):
self.check_filter({
'team': self.team.slug,
'team-activity': 1,
}, ActivityRecord.objects.filter(team=self.team, video__isnull=True))
def test_video_filter(self):
self.check_filter({
'video': self.video.video_id,
}, ActivityRecord.objects.filter(video=self.video))
def test_type_filter(self):
type_field = ActivityRecord._meta.get_field('type')
for (slug, label) in type_field.choices:
self.check_filter({
'type': type_field.get_prep_value(slug),
}, self.record_qs.filter(type=slug))
def test_language_filter(self):
self.check_filter({
'language': 'en'
}, self.record_qs.filter(language_code='en'))
def _make_timestamp(self, datetime):
return int(time.mktime(datetime.timetuple()))
def test_before_and_after_filters(self):
all_records = list(self.record_qs)
old_records = all_records[:4]
new_records = all_records[4:]
(ActivityRecord.objects
.filter(id__in=[a.id for a in old_records])
.update(created=datetime(2014, 12, 31)))
self.check_filter({
'before': self._make_timestamp(datetime(2015, 1, 1))
}, old_records)
self.check_filter({
'after': self._make_timestamp(datetime(2015, 1, 1))
}, new_records)
def test_comment(self):
# Test the comment activity, which fills in the comment field
Comment(content_object=self.video, user=self.user,
content="Test Comment").save()
record = ActivityRecord.objects.get(type='comment-added',
video=self.video)
response = self.client.get(self.detail_url(record))
assert_equal(response.status_code, status.HTTP_200_OK)
assert_equal(response.data['comment'], 'Test Comment')
self.check_activity_data(response.data, record)
def test_team_filter_permission_check(self):
# users should get a 403 response when trying to get activity for a
# team that they are not a member of
self.team_member.delete()
url = self.filtered_list_url({'team': self.team.slug})
response = self.client.get(url)
assert_equal(response.status_code, status.HTTP_403_FORBIDDEN)
def test_team_video_filter_permission_check(self):
# users should get a 403 response when trying to get activity for a
# team video when they are not a member of the team
self.team_member.delete()
url = self.filtered_list_url({'video': self.video.video_id})
response = self.client.get(url)
assert_equal(response.status_code, status.HTTP_403_FORBIDDEN)
| agpl-3.0 | -1,855,790,577,383,451,600 | 45.049451 | 80 | 0.622658 | false |
krzyzacy/test-infra | testgrid/conformance/upload_e2e.py | 1 | 10899 | #!/usr/bin/env python3
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script parses conformance test output to produce testgrid entries
#
# Assumptions:
# - there is one log file and one JUnit file (true for current conformance tests..)
# - the log file contains ginkgo's output (true for kubetest and sonobuoy..)
# - the ginkgo output will give us start / end time, and overall success
#
# - the start timestamp is suitable as a testgrid ID (unique, monotonic)
#
# - the test ran in the current year unless --year is provided
# - the timestamps are parsed on a machine with the same local time (zone)
# settings as the machine that produced the logs
#
# The log file is the source of truth for metadata, the JUnit will be consumed
# by testgrid / gubernator for individual test case results
#
# Usage: see README.md
# Required for pylint: 1.9.4 to tokenize the python3 print function.
from __future__ import print_function
import re
import sys
import time
import datetime
import argparse
import json
import subprocess
from os import path
import glob
import atexit
# logs often contain ANSI escape sequences
# https://stackoverflow.com/a/14693789
ANSI_ESCAPE_RE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
# NOTE e2e logs use go's time.StampMilli ("Jan _2 15:04:05.000")
# Example log line with a timestamp:
# Jan 26 06:38:46.284: INFO: Running AfterSuite actions on all node
# the third ':' separates the date from the rest
E2E_LOG_TIMESTAMP_RE = re.compile(r'(... .\d \d\d:\d\d:\d\d\.\d\d\d):.*')
# Ginkgo gives a line like the following at the end of successful runs:
# SUCCESS! -- 123 Passed | 0 Failed | 0 Pending | 587 Skipped PASS
# we match this to detect overall success
E2E_LOG_SUCCESS_RE = re.compile(r'Test Suite Passed')
E2E_LOG_FAIL_RE = re.compile(r'Test Suite Failed')
def log_line_strip_escape_sequences(line):
return ANSI_ESCAPE_RE.sub('', line)
def parse_e2e_log_line_timestamp(line, year):
"""parses a ginkgo e2e log line for the leading timestamp
Args:
line (str) - the log line
year (str) - 'YYYY'
Returns:
timestamp (datetime.datetime) or None
"""
match = E2E_LOG_TIMESTAMP_RE.match(line)
if match is None:
return None
# note we add year to the timestamp because the actual timestamp doesn't
# contain one and we want a datetime object...
timestamp = year+' '+match.group(1)
return datetime.datetime.strptime(timestamp, '%Y %b %d %H:%M:%S.%f')
def parse_e2e_logfile(file_handle, year):
"""parse e2e logfile at path, assuming the log is from year
Args:
file_handle (file): the log file, iterated for lines
year (str): YYYY year logfile is from
Returns:
started (datetime.datetime), finished (datetime.datetime), passed (boolean)
"""
passed = started = finished = None
for line in file_handle:
line = log_line_strip_escape_sequences(line)
# try to get a timestamp from each line, keep the first one as
# start time, and the last one as finish time
timestamp = parse_e2e_log_line_timestamp(line, year)
if timestamp:
if started:
finished = timestamp
else:
started = timestamp
if passed is False:
# if we already have found a failure, ignore subsequent pass/fails
continue
elif E2E_LOG_SUCCESS_RE.match(line):
passed = True
elif E2E_LOG_FAIL_RE.match(line):
passed = False
return started, finished, passed
def datetime_to_unix(datetime_obj):
"""convert datetime.datetime to unix timestamp"""
return int(time.mktime(datetime_obj.timetuple()))
def testgrid_started_json_contents(start_time):
"""returns the string contents of a testgrid started.json file
Args:
start_time (datetime.datetime)
Returns:
contents (str)
"""
started = datetime_to_unix(start_time)
return json.dumps({
'timestamp': started
})
def testgrid_finished_json_contents(finish_time, passed, metadata):
"""returns the string contents of a testgrid finished.json file
Args:
finish_time (datetime.datetime)
passed (bool)
metadata (str)
Returns:
contents (str)
"""
finished = datetime_to_unix(finish_time)
result = 'SUCCESS' if passed else 'FAILURE'
if metadata:
testdata = json.loads(metadata)
return json.dumps({
'timestamp': finished,
'result': result,
'metadata': testdata
})
return json.dumps({
'timestamp': finished,
'result': result
})
def upload_string(gcs_path, text, dry):
"""Uploads text to gcs_path if dry is False, otherwise just prints"""
cmd = ['gsutil', '-q', '-h', 'Content-Type:text/plain', 'cp', '-', gcs_path]
print('Run:', cmd, 'stdin=%s' % text, file=sys.stderr)
if dry:
return
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE)
proc.communicate(input=text)
if proc.returncode != 0:
raise RuntimeError(
"Failed to upload with exit code: %d" % proc.returncode)
def upload_file(gcs_path, file_path, dry):
"""Uploads file at file_path to gcs_path if dry is False, otherwise just prints"""
cmd = ['gsutil', '-q', '-h', 'Content-Type:text/plain',
'cp', file_path, gcs_path]
print('Run:', cmd, file=sys.stderr)
if dry:
return
proc = subprocess.Popen(cmd)
proc.communicate()
if proc.returncode != 0:
raise RuntimeError(
'Failed to upload with exit code: %d' % proc.returncode)
def get_current_account(dry_run):
"""gets the currently active gcp account by shelling out to gcloud"""
cmd = ['gcloud', 'auth', 'list',
'--filter=status:ACTIVE', '--format=value(account)']
print('Run:', cmd, file=sys.stderr)
if dry_run:
return ""
return subprocess.check_output(cmd, encoding='utf-8').strip('\n')
def set_current_account(account, dry_run):
"""sets the currently active gcp account by shelling out to gcloud"""
cmd = ['gcloud', 'config', 'set', 'core/account', account]
print('Run:', cmd, file=sys.stderr)
if dry_run:
return None
return subprocess.check_call(cmd)
def activate_service_account(key_file, dry_run):
"""activates a gcp service account by shelling out to gcloud"""
cmd = ['gcloud', 'auth', 'activate-service-account', '--key-file='+key_file]
print('Run:', cmd, file=sys.stderr)
if dry_run:
return
subprocess.check_call(cmd)
def revoke_current_account(dry_run):
"""logs out of the currently active gcp account by shelling out to gcloud"""
cmd = ['gcloud', 'auth', 'revoke']
print('Run:', cmd, file=sys.stderr)
if dry_run:
return None
return subprocess.check_call(cmd)
def parse_args(cli_args=None):
if cli_args is None:
cli_args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help=('GCS bucket to upload the results to,'
' of the form \'gs://foo/bar\''),
required=True,
)
parser.add_argument(
'--year',
help=('the year in which the log is from, defaults to the current year.'
' format: YYYY'),
default=str(datetime.datetime.now().year),
)
parser.add_argument(
'--junit',
help='path or glob expression to the junit xml results file(s)',
required=True,
)
parser.add_argument(
'--log',
help='path to the test log file, should contain the ginkgo output',
required=True,
)
parser.add_argument(
'--dry-run',
help='if set, do not actually upload anything, only print actions',
required=False,
action='store_true',
)
parser.add_argument(
'--metadata',
help='dictionary of additional key-value pairs that can be displayed to the user.',
required=False,
default=str(),
)
parser.add_argument(
'--key-file',
help='path to GCP service account key file, which will be activated before '
'uploading if provided, the account will be revoked and the active account reset '
'on exit',
required=False,
)
return parser.parse_args(args=cli_args)
def main(cli_args):
args = parse_args(cli_args)
# optionally activate a service account with upload credentials
if args.key_file:
# grab the currently active account if any, and if there is one
# register a handler to set it active again on exit
current_account = get_current_account(args.dry_run)
if current_account:
atexit.register(
lambda: set_current_account(current_account, args.dry_run)
)
# login to the service account and register a handler to logout before exit
# NOTE: atexit handlers are called in LIFO order
activate_service_account(args.key_file, args.dry_run)
atexit.register(lambda: revoke_current_account(args.dry_run))
# find the matching junit files, there should be at least one for a useful
# testgrid entry
junits = glob.glob(args.junit)
if not junits:
print('No matching JUnit files found!')
sys.exit(-1)
# parse the e2e.log for start time, finish time, and success
with open(args.log) as file_handle:
started, finished, passed = parse_e2e_logfile(file_handle, args.year)
# convert parsed results to testgrid json metadata blobs
started_json = testgrid_started_json_contents(started)
finished_json = testgrid_finished_json_contents(
finished, passed, args.metadata)
# use timestamp as build ID
gcs_dir = args.bucket + '/' + str(datetime_to_unix(started))
# upload metadata, log, junit to testgrid
print('Uploading entry to: %s' % gcs_dir)
upload_string(gcs_dir+'/started.json', started_json, args.dry_run)
upload_string(gcs_dir+'/finished.json', finished_json, args.dry_run)
upload_file(gcs_dir+'/build-log.txt', args.log, args.dry_run)
for junit_file in junits:
upload_file(gcs_dir+'/artifacts/' +
path.basename(junit_file), junit_file, args.dry_run)
print('Done.')
if __name__ == '__main__':
main(sys.argv[1:])
| apache-2.0 | -2,350,796,836,719,295,500 | 32.228659 | 91 | 0.645747 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/test/test_epoll.py | 1 | 4295 | # Copyright (c) 2001-2006 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for epoll wrapper.
"""
import socket, errno, time
from twisted.trial import unittest
try:
from twisted.python import _epoll
except ImportError:
_epoll = None
class EPoll(unittest.TestCase):
"""
Tests for the low-level epoll bindings.
"""
def setUp(self):
"""
Create a listening server port and a list with which to keep track
of created sockets.
"""
self.serverSocket = socket.socket()
self.serverSocket.bind(('127.0.0.1', 0))
self.serverSocket.listen(1)
self.connections = [self.serverSocket]
def tearDown(self):
"""
Close any sockets which were opened by the test.
"""
for skt in self.connections:
skt.close()
def _connectedPair(self):
"""
Return the two sockets which make up a new TCP connection.
"""
client = socket.socket()
client.setblocking(False)
try:
client.connect(('127.0.0.1', self.serverSocket.getsockname()[1]))
except socket.error, e:
self.assertEquals(e.args[0], errno.EINPROGRESS)
else:
raise unittest.FailTest("Connect should have raised EINPROGRESS")
server, addr = self.serverSocket.accept()
self.connections.extend((client, server))
return client, server
def test_create(self):
"""
Test the creation of an epoll object.
"""
try:
p = _epoll.epoll(16)
except OSError, e:
raise unittest.FailTest(str(e))
else:
p.close()
def test_badCreate(self):
"""
Test that attempting to create an epoll object with some random
objects raises a TypeError.
"""
self.assertRaises(TypeError, _epoll.epoll, 1, 2, 3)
self.assertRaises(TypeError, _epoll.epoll, 'foo')
self.assertRaises(TypeError, _epoll.epoll, None)
self.assertRaises(TypeError, _epoll.epoll, ())
self.assertRaises(TypeError, _epoll.epoll, ['foo'])
self.assertRaises(TypeError, _epoll.epoll, {})
self.assertRaises(TypeError, _epoll.epoll)
def test_add(self):
"""
Test adding a socket to an epoll object.
"""
server, client = self._connectedPair()
p = _epoll.epoll(2)
try:
p._control(_epoll.CTL_ADD, server.fileno(), _epoll.IN | _epoll.OUT)
p._control(_epoll.CTL_ADD, client.fileno(), _epoll.IN | _epoll.OUT)
finally:
p.close()
def test_controlAndWait(self):
"""
Test waiting on an epoll object which has had some sockets added to
it.
"""
client, server = self._connectedPair()
p = _epoll.epoll(16)
p._control(_epoll.CTL_ADD, client.fileno(), _epoll.IN | _epoll.OUT |
_epoll.ET)
p._control(_epoll.CTL_ADD, server.fileno(), _epoll.IN | _epoll.OUT |
_epoll.ET)
now = time.time()
events = p.wait(4, 1000)
then = time.time()
self.failIf(then - now > 0.01)
events.sort()
expected = [(client.fileno(), _epoll.OUT),
(server.fileno(), _epoll.OUT)]
expected.sort()
self.assertEquals(events, expected)
now = time.time()
events = p.wait(4, 200)
then = time.time()
self.failUnless(then - now > 0.1)
self.failIf(events)
client.send("Hello!")
server.send("world!!!")
now = time.time()
events = p.wait(4, 1000)
then = time.time()
self.failIf(then - now > 0.01)
events.sort()
expected = [(client.fileno(), _epoll.IN | _epoll.OUT),
(server.fileno(), _epoll.IN | _epoll.OUT)]
expected.sort()
self.assertEquals(events, expected)
if _epoll is None:
EPoll.skip = "_epoll module unavailable"
else:
try:
e = _epoll.epoll(16)
except IOError, exc:
if exc.errno == errno.ENOSYS:
del exc
EPoll.skip = "epoll support missing from platform"
else:
raise
else:
e.close()
del e
| bsd-3-clause | 782,778,448,928,796,300 | 26.183544 | 79 | 0.554133 | false |
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/site-packages/tornado-4.1-py2.7-linux-x86_64.egg/tornado/test/httpclient_test.py | 19 | 23320 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
def get(self):
self.write("asdf")
self.flush()
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1)
self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.startswith('Content-Type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u("MyUserAgent"), b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
@gen_test
def test_body_sanity_checks(self):
hello_url = self.get_url('/hello')
with self.assertRaises(ValueError) as context:
yield self.http_client.fetch(hello_url, body='data')
self.assertTrue('must be None' in str(context.exception))
with self.assertRaises(ValueError) as context:
yield self.http_client.fetch(hello_url, method='POST')
self.assertTrue('must not be None' in str(context.exception))
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
#def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
self.server_ioloop.stop()
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://127.0.0.1:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
| apache-2.0 | 2,329,953,434,612,613,600 | 38.325464 | 94 | 0.613679 | false |
lifeinoppo/littlefishlet-scode | RES/REF/python_sourcecode/ipython-master/IPython/core/tests/test_iplib.py | 33 | 6393 | """Tests for the key interactiveshell module, where the main ipython class is defined.
"""
#-----------------------------------------------------------------------------
# Module imports
#-----------------------------------------------------------------------------
# third party
import nose.tools as nt
# our own packages
from IPython.testing.globalipapp import get_ipython
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# Get the public instance of IPython
ip = get_ipython()
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test_reset():
"""reset must clear most namespaces."""
# Check that reset runs without error
ip.reset()
# Once we've reset it (to clear of any junk that might have been there from
# other tests, we can count how many variables are in the user's namespace
nvars_user_ns = len(ip.user_ns)
nvars_hidden = len(ip.user_ns_hidden)
# Now add a few variables to user_ns, and check that reset clears them
ip.user_ns['x'] = 1
ip.user_ns['y'] = 1
ip.reset()
# Finally, check that all namespaces have only as many variables as we
# expect to find in them:
nt.assert_equal(len(ip.user_ns), nvars_user_ns)
nt.assert_equal(len(ip.user_ns_hidden), nvars_hidden)
# Tests for reporting of exceptions in various modes, handling of SystemExit,
# and %tb functionality. This is really a mix of testing ultraTB and interactiveshell.
def doctest_tb_plain():
"""
In [18]: xmode plain
Exception reporting mode: Plain
In [19]: run simpleerr.py
Traceback (most recent call last):
...line 32, in <module>
bar(mode)
...line 16, in bar
div0()
...line 8, in div0
x/y
ZeroDivisionError: ...
"""
def doctest_tb_context():
"""
In [3]: xmode context
Exception reporting mode: Context
In [4]: run simpleerr.py
---------------------------------------------------------------------------
ZeroDivisionError Traceback (most recent call last)
<BLANKLINE>
... in <module>()
30 mode = 'div'
31
---> 32 bar(mode)
<BLANKLINE>
... in bar(mode)
14 "bar"
15 if mode=='div':
---> 16 div0()
17 elif mode=='exit':
18 try:
<BLANKLINE>
... in div0()
6 x = 1
7 y = 0
----> 8 x/y
9
10 def sysexit(stat, mode):
<BLANKLINE>
ZeroDivisionError: ...
"""
def doctest_tb_verbose():
"""
In [5]: xmode verbose
Exception reporting mode: Verbose
In [6]: run simpleerr.py
---------------------------------------------------------------------------
ZeroDivisionError Traceback (most recent call last)
<BLANKLINE>
... in <module>()
30 mode = 'div'
31
---> 32 bar(mode)
global bar = <function bar at ...>
global mode = 'div'
<BLANKLINE>
... in bar(mode='div')
14 "bar"
15 if mode=='div':
---> 16 div0()
global div0 = <function div0 at ...>
17 elif mode=='exit':
18 try:
<BLANKLINE>
... in div0()
6 x = 1
7 y = 0
----> 8 x/y
x = 1
y = 0
9
10 def sysexit(stat, mode):
<BLANKLINE>
ZeroDivisionError: ...
"""
def doctest_tb_sysexit():
"""
In [17]: %xmode plain
Exception reporting mode: Plain
In [18]: %run simpleerr.py exit
An exception has occurred, use %tb to see the full traceback.
SystemExit: (1, 'Mode = exit')
In [19]: %run simpleerr.py exit 2
An exception has occurred, use %tb to see the full traceback.
SystemExit: (2, 'Mode = exit')
In [20]: %tb
Traceback (most recent call last):
File ... in <module>
bar(mode)
File ... line 22, in bar
sysexit(stat, mode)
File ... line 11, in sysexit
raise SystemExit(stat, 'Mode = %s' % mode)
SystemExit: (2, 'Mode = exit')
In [21]: %xmode context
Exception reporting mode: Context
In [22]: %tb
---------------------------------------------------------------------------
SystemExit Traceback (most recent call last)
<BLANKLINE>
...<module>()
30 mode = 'div'
31
---> 32 bar(mode)
<BLANKLINE>
...bar(mode)
20 except:
21 stat = 1
---> 22 sysexit(stat, mode)
23 else:
24 raise ValueError('Unknown mode')
<BLANKLINE>
...sysexit(stat, mode)
9
10 def sysexit(stat, mode):
---> 11 raise SystemExit(stat, 'Mode = %s' % mode)
12
13 def bar(mode):
<BLANKLINE>
SystemExit: (2, 'Mode = exit')
In [23]: %xmode verbose
Exception reporting mode: Verbose
In [24]: %tb
---------------------------------------------------------------------------
SystemExit Traceback (most recent call last)
<BLANKLINE>
... in <module>()
30 mode = 'div'
31
---> 32 bar(mode)
global bar = <function bar at ...>
global mode = 'exit'
<BLANKLINE>
... in bar(mode='exit')
20 except:
21 stat = 1
---> 22 sysexit(stat, mode)
global sysexit = <function sysexit at ...>
stat = 2
mode = 'exit'
23 else:
24 raise ValueError('Unknown mode')
<BLANKLINE>
... in sysexit(stat=2, mode='exit')
9
10 def sysexit(stat, mode):
---> 11 raise SystemExit(stat, 'Mode = %s' % mode)
global SystemExit = undefined
stat = 2
mode = 'exit'
12
13 def bar(mode):
<BLANKLINE>
SystemExit: (2, 'Mode = exit')
"""
def test_run_cell():
import textwrap
ip.run_cell('a = 10\na+=1')
ip.run_cell('assert a == 11\nassert 1')
nt.assert_equal(ip.user_ns['a'], 11)
complex = textwrap.dedent("""
if 1:
print "hello"
if 1:
print "world"
if 2:
print "foo"
if 3:
print "bar"
if 4:
print "bar"
""")
# Simply verifies that this kind of input is run
ip.run_cell(complex)
def test_db():
"""Test the internal database used for variable persistence."""
ip.db['__unittest_'] = 12
nt.assert_equal(ip.db['__unittest_'], 12)
del ip.db['__unittest_']
assert '__unittest_' not in ip.db
| gpl-2.0 | -3,174,732,701,778,824,000 | 24.169291 | 87 | 0.497419 | false |
wangwei7175878/tutorials | matplotlibTUT/plt17_plot_in_plot.py | 3 | 1094 | # View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 17 - plot in plot
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
Tutorial reference:
http://www.python-course.eu/matplotlib_multiple_figures.php
"""
import matplotlib.pyplot as plt
fig = plt.figure()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 3, 4, 2, 5, 8, 6]
# below are all percentage
left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
ax1 = fig.add_axes([left, bottom, width, height]) # main axes
ax1.plot(x, y, 'r')
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_title('title')
ax2 = fig.add_axes([0.2, 0.6, 0.25, 0.25]) # inside axes
ax2.plot(y, x, 'b')
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_title('title inside 1')
# different method to add axes
####################################
plt.axes([0.6, 0.2, 0.25, 0.25])
plt.plot(y[::-1], x, 'g')
plt.xlabel('x')
plt.ylabel('y')
plt.title('title inside 2')
plt.show()
| mit | 1,359,926,738,106,568,200 | 24.44186 | 82 | 0.647166 | false |
sparkslabs/kamaelia_ | Sketches/MPS/BugReports/FixTests/Axon/Axon/AdaptiveCommsComponent.py | 6 | 11417 | #
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=====================================================================
"Adaptive Comms Components" - can add and remove inboxes and outboxes
=====================================================================
An AdaptiveCommsComponent is just like an ordinary component but with the
ability to create and destroy extra inboxes and outboxes whilst it is running.
* An AdaptiveCommsComponent is based on an Axon.Component.component
There are other variants on the basic component:
* Axon.ThreadedComponent.threadedcomponent
* Axon.ThreadedComponent.threadedadaptivecommscomponent
If your component needs to block - eg. wait on a system call; then make it a
'threaded' component. If it needs to change what inboxes or outboxes it has at
runtime, then make it an 'adaptive' component. Otherwise, simply make it an
ordinary component!
Adding and removing inboxes and outboxes
----------------------------------------
To add a new inbox or outbox call self.addInbox() or self.addOutbox() specifying
a base name for the inbox/outbox. The created inbox or outbox is immediately
ready to be used.::
actualInboxName = self.addInbox("inputData")
actualOutboxName = self.addOutbox("outputData")
You specify a name you would ideally like the inbox or outbox to be given. If
that name is already taken then a variant of it will be generated. Calls to
addInbox() and addOutbox() therefore return the actual name the inbox or outbox
was given. You should always use this returned name. It is unwise to assume your
ideal choice of name has been allocated!
To remove a box, call self.deleteInbox() or self.deleteOutbox() specifying the
name of the box to be deleted::
self.deleteInbox(actualInboxName)
self.deleteOutbox(actualOutboxName)
When deleting an inbox or outbox, try to make sure that any linkages involving
that inbox/outbox have been destroyed. This includes not only linkages created
by your component, but any created by other components too.
Tracking resources
------------------
adaptivecommscomponent also includes an ability to track associations between
resources and inboxes, outboxes and other information.
For example, you might want to associate another component (that your component
is interacting with) with the set of inboxes, outboxes and any other info that
are being used to communicate with it.
You can also associate particular inboxes or outboxes with those resources. This
therefore allows you to map both ways: "which resource relates to this inbox?"
and "which inboxes relate to this resource?"
For example, suppose a request leads to your component creating an inbox and
outbox to deal with another component. You might store these as a tracked
resource, along with other information, such as the 'other' component and any
state or linkages that were created; and associate this resource with the inbox
from which data might arrive::
def wireUpToOtherComponent(self, theComponent):
newIn = self.addInbox("commsIn")
newOut = self.addOutbox("commsOut")
newState = "WAITING"
inLinkage = self.link((theComponent,itsOutbox),(self,newIn))
outLinkage = self.link((theComponent,itsInbox), (self,newOut))
resource = theComponent
inboxes = [newIn]
outboxes = [newOut]
info = (newState, inLinkage, outLinkage)
self.trackResourceInformation(resource, inboxes, outboxes, info)
self.trackResource(resource, newIn)
If a message then arrives at that inbox, we can easily look up all the
information we might need know where it came from and how to handle it::
def handleMessageArrived(self, inboxName):
msg = self.recv(inboxName)
resource = self.retrieveResource(inboxName)
inboxes, outboxes, info = self.retrieveResourceInformation(resource)
theComponent=resource
...
When you are finished with a resource and its associated information you can
clean it up with the ceaseTrackingResource() method which removes the
association between the resource and information. For example when you get rid
of a set of linkages and inboxes or outboxes associated with another component
you might want to clean up the resource you were using to track this too::
def doneWithComponent(self, theComponent):
resource=theComponent
inboxes, outboxes, info = self.retrieveResourceInformation(resource)
for name in inboxes:
self.deleteInbox(name)
for name in outboxes:
self.deleteOutbox(name)
state,linkages = info[0], info[1:]
for linkage in linkages:
self.unlink(thelinkage=linkage)
self.ceaseTrackingResource(resource)
Implementation
--------------
AdaptiveCommsComponent's functionality above and beyond the ordinary
Axon.Component.component is implemented in a separate mixin class
_AdaptiveCommsable. This enables it to be reused for other variants on the
basic component that need to inherit this functionality - such as the
threadedadaptivecommscomponent.
When adding new inboxes or outboxes, name clashes are resolved by permuting the
box name with a suffixed unique ID number until there is no longer any clash.
"""
import sys
from Axon.Component import component
import Axon.idGen as idGen
from Axon.Box import makeInbox, makeOutbox
from Axon.util import next
class _AdaptiveCommsable(object):
"""\
Mixin for making a component 'adaptable' so that it can create and destroy
extra inboxes and outboxes at runtime.
"""
#
# Public Methods
#
def __init__(self, *args, **argd):
super(_AdaptiveCommsable, self).__init__(*args, **argd)
self._resourceStore = {}
self._resourceLookup = {}
def trackResource(self, resource, inbox):
"""\
Associate the specified resource with the named inbox.
"""
self.inboxes[inbox] # Force failure if the inbox does not exist
self._resourceLookup[inbox] = resource
def retrieveTrackedResource(self, inbox):
"""\
Retrieve the resource that has been associated with the named inbox.
"""
return self._resourceLookup[inbox]
def trackResourceInformation(self, resource, inboxes, outboxes, information):
"""\
Store a list of inboxes, outboxes and other information as the specified
resource.
The inboxes and outboxes specified must exist.
"""
"Provides a lookup service associating inboxes/outboxes & user information with a resource. Uses GIGO principle."
#sys.stderr.write("OHHHH We're in HERE???!!\n"); sys.stderr.flush()
# print "TRACKING", inboxes, outboxes, information
# print "USING", repr(resource)
# print "TRACKING FOR RESOURCE", resource
[ self.inboxes[x] for x in inboxes] # Force an assertion if any inbox does not exist
[ self.outboxes[x] for x in outboxes] # Force an assertion if any inbox does not exist
# if self._resourceStore.get(resource, False):
# print "Changing resources tracked for", resource
# print "Was tracking", self._resourceStore[resource]
# print "Now Tracking", (inboxes, outboxes, information)
self._resourceStore[resource] = (inboxes, outboxes, information)
def ceaseTrackingResource(self, resource):
"""Stop tracking a resource and release references to it"""
# print "CEASING TO TRACK RESOURCE", repr(resource)
del self._resourceStore[resource]
def retrieveTrackedResourceInformation(self, resource):
"""\
Retrieve a tuple (inboxes, outboxes, otherdata) that has been stored as
the specified resource.
"""
# print self._resourceStore
return self._resourceStore[resource]
def addInbox(self,*args):
"""
Allocates a new inbox with name *based on* the name provided. If a box
with the suggested name already exists then a variant is used instead.
Returns the name of the inbox added.
"""
name = self._newInboxName(*args)
self.inboxes[name]=makeInbox(self.unpause)
return name
def deleteInbox(self,name):
"""\
Deletes the named inbox. Any messages in it are lost.
Try to ensure any linkages to involving this outbox have been destroyed -
not just ones created by this component, but by others too! Behaviour is
undefined if this is not the case, and should be avoided.
"""
del self.inboxes[name]
def addOutbox(self,*args):
"""\
Allocates a new outbox with name *based on* the name provided. If a box
with the suggested name already exists then a variant is used instead.
Returns the name of the outbox added.
"""
name = self._newOutboxName(*args)
self.outboxes[name]=makeOutbox(self.unpause)
return name
def deleteOutbox(self,name):
"""\
Deletes the named outbox.
Try to ensure any linkages to involving this outbox have been destroyed -
not just ones created by this component, but by others too! Behaviour is
undefined if this is not the case, and should be avoided.
"""
del self.outboxes[name]
#
# Private Methods
#
def _newInboxName(self, name="inbox"):
"""\
Allocates a new inbox with name *based on* the name provided.
If this name is available it will be returned unchanged.
Otherwise the name will be returned with a number appended
"""
while name in self.inboxes:
name =name+str(next(idGen.idGen()))
return name
#
def _newOutboxName(self, name="outbox"):
"""\
Allocates a new outbox name *based on* the name provided.
If this name is available it will be returned unchanged.
Otherwise the name will be returned with a number appended
"""
while name in self.outboxes:
name =name+str(next(idGen.idGen()))
return name
class AdaptiveCommsComponent(component, _AdaptiveCommsable):
"""\
Base class for a component that works just like an ordinary component but can
also 'adapt' its comms by adding or removing inboxes and outboxes whilst it
is running.
Subclass to make your own.
See Axon.AdaptiveCommsComponent._AdaptiveCommsable for the extra methods that
this subclass of component has.
"""
def __init__(self,*args, **argd):
component.__init__(self,*args, **argd)
_AdaptiveCommsable.__init__(self)
if __name__=="__main__":
print("Tests are separated into test/test_AdaptiveCommsableComponent.py")
| apache-2.0 | 6,633,554,421,765,679,000 | 35.592949 | 119 | 0.695454 | false |
ukanga/SickRage | tests/pp_tests.py | 11 | 6594 | # coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""
Test post processing
"""
from __future__ import print_function, unicode_literals
import os.path
import shutil
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import sickbeard
from sickbeard.helpers import make_dirs
from sickbeard.name_cache import addNameToCache
from sickbeard.postProcessor import PostProcessor
from sickbeard.tv import TVEpisode, TVShow
import tests.test_lib as test
class PPInitTests(unittest.TestCase):
"""
Init tests
"""
def setUp(self):
"""
Set up tests
"""
self.post_processor = PostProcessor(test.FILE_PATH)
def test_init_file_name(self):
"""
Test file name
"""
self.assertEqual(self.post_processor.file_name, test.FILENAME)
def test_init_folder_name(self):
"""
Test folder name
"""
self.assertEqual(self.post_processor.folder_name, test.SHOW_NAME)
class PPBasicTests(test.SickbeardTestDBCase):
"""
Basic tests
"""
def test_process(self):
"""
Test process
"""
show = TVShow(1, 3)
show.name = test.SHOW_NAME
show.location = test.SHOW_DIR
show.saveToDB()
sickbeard.showList = [show]
episode = TVEpisode(show, test.SEASON, test.EPISODE)
episode.name = "some episode name"
episode.saveToDB()
addNameToCache('show name', 3)
sickbeard.PROCESS_METHOD = 'move'
post_processor = PostProcessor(test.FILE_PATH)
self.assertTrue(post_processor.process())
class ListAssociatedFiles(unittest.TestCase):
def __init__(self, test_case):
super(ListAssociatedFiles, self).__init__(test_case)
self.test_tree = os.path.join('Show Name', 'associated_files', 'random', 'recursive', 'subdir')
file_names = [
'Show Name [SickRage].avi',
'Show Name [SickRage].srt',
'Show Name [SickRage].nfo',
'Show Name [SickRage].en.srt',
'Non-Associated Show [SickRage].srt',
'Non-Associated Show [SickRage].en.srt',
'Show [SickRage] Non-Associated.en.srt',
'Show [SickRage] Non-Associated.srt',
]
self.file_list = [os.path.join('Show Name', f) for f in file_names] + [os.path.join(self.test_tree, f) for f in file_names]
self.post_processor = PostProcessor('Show Name')
self.maxDiff = None
sickbeard.MOVE_ASSOCIATED_FILES = True
sickbeard.ALLOWED_EXTENSIONS = u''
def setUp(self):
make_dirs(self.test_tree)
for test_file in self.file_list:
open(test_file, 'a').close()
def tearDown(self):
shutil.rmtree('Show Name')
def test_subfolders(self):
# Test edge cases first:
self.assertEqual([], # empty file_path
self.post_processor.list_associated_files('', subfolders=True))
self.assertEqual([], # no file name
self.post_processor.list_associated_files('\\Show Name\\.nomedia', subfolders=True))
associated_files = self.post_processor.list_associated_files(self.file_list[0], subfolders=True)
associated_files = sorted(file_name.lstrip('./') for file_name in associated_files)
out_list = sorted(file_name for file_name in self.file_list[1:] if 'Non-Associated' not in file_name)
self.assertEqual(out_list, associated_files)
# Test no associated files:
associated_files = self.post_processor.list_associated_files('Fools Quest.avi', subfolders=True)
def test_no_subfolders(self):
associated_files = self.post_processor.list_associated_files(self.file_list[0], subfolders=False)
associated_files = sorted(file_name.lstrip('./') for file_name in associated_files)
out_list = sorted(file_name for file_name in self.file_list[1:] if 'associated_files' not in file_name and 'Non-Associated' not in file_name)
self.assertEqual(out_list, associated_files)
def test_subtitles_only(self):
associated_files = self.post_processor.list_associated_files(self.file_list[0], subtitles_only=True, subfolders=True)
associated_files = sorted(file_name.lstrip('./') for file_name in associated_files)
out_list = sorted(file_name for file_name in self.file_list if file_name.endswith('.srt') and 'Non-Associated' not in file_name)
self.assertEqual(out_list, associated_files)
def test_subtitles_only_no_subfolders(self):
associated_files = self.post_processor.list_associated_files(self.file_list[0], subtitles_only=True, subfolders=False)
associated_files = sorted(file_name.lstrip('./') for file_name in associated_files)
out_list = sorted(file_name for file_name in self.file_list if file_name.endswith('.srt') and 'associated_files' not in file_name and 'Non-Associated' not in file_name)
self.assertEqual(out_list, associated_files)
if __name__ == '__main__':
print("==================")
print("STARTING - PostProcessor TESTS")
print("==================")
print("######################################################################")
SUITE = unittest.TestLoader().loadTestsFromTestCase(PPInitTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
print("######################################################################")
SUITE = unittest.TestLoader().loadTestsFromTestCase(PPBasicTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
print("######################################################################")
SUITE = unittest.TestLoader().loadTestsFromTestCase(ListAssociatedFiles)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-3.0 | 172,151,293,531,098,000 | 36.044944 | 176 | 0.639066 | false |
rmhyman/DataScience | Lesson2/pandasql_and_weather_data1.py | 1 | 1526 | import pandas
import pandasql
def num_rainy_days(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return one column and
one row - a count of the number of days in the dataframe where
the rain column is equal to 1 (i.e., the number of days it
rained). The dataframe will be titled 'weather_data'. You'll
need to provide the SQL query. You might find SQL's count function
useful for this exercise. You can read more about it here:
https://dev.mysql.com/doc/refman/5.1/en/counting-rows.html
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be useful to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
You can see the weather data that we are passing in below:
https://www.dropbox.com/s/7sf0yqc9ykpq3w8/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q = """
SELECT
count(rain)
FROM
weather_data
WHERE
cast(rain as integer) = 1
"""
#Execute your SQL command against the pandas frame
rainy_days = pandasql.sqldf(q.lower(), locals())
return rainy_days
| mit | -1,318,662,145,173,624,000 | 36.15 | 93 | 0.675623 | false |
percyfal/luigi | luigi/contrib/scalding.py | 22 | 10381 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import subprocess
from luigi import six
import luigi.configuration
import luigi.contrib.hadoop
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
from luigi import LocalTarget
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
"""
Scalding support for Luigi.
Example configuration section in luigi.cfg::
[scalding]
# scala home directory, which should include a lib subdir with scala jars.
scala-home: /usr/share/scala
# scalding home directory, which should include a lib subdir with
# scalding-*-assembly-* jars as built from the official Twitter build script.
scalding-home: /usr/share/scalding
# provided dependencies, e.g. jars required for compiling but not executing
# scalding jobs. Currently requred jars:
# org.apache.hadoop/hadoop-core/0.20.2
# org.slf4j/slf4j-log4j12/1.6.6
# log4j/log4j/1.2.15
# commons-httpclient/commons-httpclient/3.1
# commons-cli/commons-cli/1.2
# org.apache.zookeeper/zookeeper/3.3.4
scalding-provided: /usr/share/scalding/provided
# additional jars required.
scalding-libjars: /usr/share/scalding/libjars
"""
class ScaldingJobRunner(luigi.contrib.hadoop.JobRunner):
"""
JobRunner for `pyscald` commands. Used to run a ScaldingJobTask.
"""
def __init__(self):
conf = luigi.configuration.get_config()
default = os.environ.get('SCALA_HOME', '/usr/share/scala')
self.scala_home = conf.get('scalding', 'scala-home', default)
default = os.environ.get('SCALDING_HOME', '/usr/share/scalding')
self.scalding_home = conf.get('scalding', 'scalding-home', default)
self.provided_dir = conf.get(
'scalding', 'scalding-provided', os.path.join(default, 'provided'))
self.libjars_dir = conf.get(
'scalding', 'scalding-libjars', os.path.join(default, 'libjars'))
self.tmp_dir = LocalTarget(is_tmp=True)
def _get_jars(self, path):
return [os.path.join(path, j) for j in os.listdir(path)
if j.endswith('.jar')]
def get_scala_jars(self, include_compiler=False):
lib_dir = os.path.join(self.scala_home, 'lib')
jars = [os.path.join(lib_dir, 'scala-library.jar')]
# additional jar for scala 2.10 only
reflect = os.path.join(lib_dir, 'scala-reflect.jar')
if os.path.exists(reflect):
jars.append(reflect)
if include_compiler:
jars.append(os.path.join(lib_dir, 'scala-compiler.jar'))
return jars
def get_scalding_jars(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
return self._get_jars(lib_dir)
def get_scalding_core(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
for j in os.listdir(lib_dir):
if j.startswith('scalding-core-'):
p = os.path.join(lib_dir, j)
logger.debug('Found scalding-core: %s', p)
return p
raise luigi.contrib.hadoop.HadoopJobError('Could not find scalding-core.')
def get_provided_jars(self):
return self._get_jars(self.provided_dir)
def get_libjars(self):
return self._get_jars(self.libjars_dir)
def get_tmp_job_jar(self, source):
job_name = os.path.basename(os.path.splitext(source)[0])
return os.path.join(self.tmp_dir.path, job_name + '.jar')
def get_build_dir(self, source):
build_dir = os.path.join(self.tmp_dir.path, 'build')
return build_dir
def get_job_class(self, source):
# find name of the job class
# usually the one that matches file name or last class that extends Job
job_name = os.path.splitext(os.path.basename(source))[0]
package = None
job_class = None
for l in open(source).readlines():
p = re.search(r'package\s+([^\s\(]+)', l)
if p:
package = p.groups()[0]
p = re.search(r'class\s+([^\s\(]+).*extends\s+.*Job', l)
if p:
job_class = p.groups()[0]
if job_class == job_name:
break
if job_class:
if package:
job_class = package + '.' + job_class
logger.debug('Found scalding job class: %s', job_class)
return job_class
else:
raise luigi.contrib.hadoop.HadoopJobError('Coudl not find scalding job class.')
def build_job_jar(self, job):
job_jar = job.jar()
if job_jar:
if not os.path.exists(job_jar):
logger.error("Can't find jar: %s, full path %s", job_jar, os.path.abspath(job_jar))
raise Exception("job jar does not exist")
if not job.job_class():
logger.error("Undefined job_class()")
raise Exception("Undefined job_class()")
return job_jar
job_src = job.source()
if not job_src:
logger.error("Both source() and jar() undefined")
raise Exception("Both source() and jar() undefined")
if not os.path.exists(job_src):
logger.error("Can't find source: %s, full path %s", job_src, os.path.abspath(job_src))
raise Exception("job source does not exist")
job_src = job.source()
job_jar = self.get_tmp_job_jar(job_src)
build_dir = self.get_build_dir(job_src)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
classpath = ':'.join(filter(None,
self.get_scalding_jars() +
self.get_provided_jars() +
self.get_libjars() +
job.extra_jars()))
scala_cp = ':'.join(self.get_scala_jars(include_compiler=True))
# compile scala source
arglist = ['java', '-cp', scala_cp, 'scala.tools.nsc.Main',
'-classpath', classpath,
'-d', build_dir, job_src]
logger.info('Compiling scala source: %s', ' '.join(arglist))
subprocess.check_call(arglist)
# build job jar file
arglist = ['jar', 'cf', job_jar, '-C', build_dir, '.']
logger.info('Building job jar: %s', ' '.join(arglist))
subprocess.check_call(arglist)
return job_jar
def run_job(self, job):
job_jar = self.build_job_jar(job)
jars = [job_jar] + self.get_libjars() + job.extra_jars()
scalding_core = self.get_scalding_core()
libjars = ','.join(filter(None, jars))
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', scalding_core, '-libjars', libjars]
arglist += ['-D%s' % c for c in job.jobconfs()]
job_class = job.job_class() or self.get_job_class(job.source())
arglist += [job_class, '--hdfs']
# scalding does not parse argument with '=' properly
arglist += ['--name', job.task_id.replace('=', ':')]
(tmp_files, job_args) = luigi.contrib.hadoop_jar.fix_paths(job)
arglist += job_args
env = os.environ.copy()
jars.append(scalding_core)
hadoop_cp = ':'.join(filter(None, jars))
env['HADOOP_CLASSPATH'] = hadoop_cp
logger.info("Submitting Hadoop job: HADOOP_CLASSPATH=%s %s",
hadoop_cp, ' '.join(arglist))
luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, env=env)
for a, b in tmp_files:
a.move(b)
class ScaldingJobTask(luigi.contrib.hadoop.BaseHadoopJobTask):
"""
A job task for Scalding that define a scala source and (optional) main method.
requires() should return a dictionary where the keys are Scalding argument
names and values are sub tasks or lists of subtasks.
For example:
.. code-block:: python
{'input1': A, 'input2': C} => --input1 <Aoutput> --input2 <Coutput>
{'input1': [A, B], 'input2': [C]} => --input1 <Aoutput> <Boutput> --input2 <Coutput>
"""
def relpath(self, current_file, rel_path):
"""
Compute path given current file and relative path.
"""
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
def source(self):
"""
Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def jar(self):
"""
Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def extra_jars(self):
"""
Extra jars for building and running this Scalding Job.
"""
return []
def job_class(self):
"""
optional main job class for this Scalding Job.
"""
return None
def job_runner(self):
return ScaldingJobRunner()
def atomic_output(self):
"""
If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes.
"""
return True
def requires(self):
return {}
def job_args(self):
"""
Extra arguments to pass to the Scalding job.
"""
return []
def args(self):
"""
Returns an array of args to pass to the job.
"""
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
| apache-2.0 | -291,991,431,541,930,430 | 33.036066 | 100 | 0.590984 | false |
borkit/scriptdump | AWS/more-aws-scripts/s3-mongodump.py | 1 | 3965 | #!/usr/bin/env python
import boto3
import sys
import argparse
import subprocess
import shutil
import os
from datetime import datetime
import operator
def dump(host, database, username, password, out):
if username and password:
auth_str= "--username %s --password %s" % (username, password)
else:
auth_str=""
if database:
db_str="--db %s" % (database)
else:
db_str=""
mongodump_cmd="mongodump --host %s -o %s %s %s" % (host,out,auth_str,db_str)
print mongodump_cmd
mongodump_output = subprocess.check_output(mongodump_cmd, shell=True)
print mongodump_output
def main():
parser = argparse.ArgumentParser(description='A tool to make mongodb backups on Amazon s3')
parser.add_argument('-u', '--user',
help="Mongodb user (optional)")
parser.add_argument('-p', '--password',
help="Mongodb password (optional)")
parser.add_argument('-H', '--host', default="localhost:27017",
help="Mongodb host: <hostname>:<port>." )
parser.add_argument('-d', '--database',
help="The database to backup (all if not provided)")
parser.add_argument('-o', '--out', default='dump',
help="The output directory for dumped files")
parser.add_argument('-n', '--number', type=int, default=7,
help="Number of copies to retain in the S3 bucket")
parser.add_argument('-b', '--bucket', required=True,
help="Amazon s3 bucket." )
parser.add_argument('-P', '--prefix',
help="For grouped objects aka s3 folders, provide the prefix key")
arg = parser.parse_args()
if arg.user and not arg.password:
parser.error("You provided a user but not a password")
if arg.password and not arg.user:
parser.error("You provided a password but not a user")
if arg.prefix is not None and arg.prefix[-1:] is "/":
arg.prefix="%s" % arg.prefix[:-1]
# mongodump
dump(arg.host, arg.database, arg.user, arg.password, arg.out)
# List and get the number of files in the bucket
num_files=0
s3 = boto3.resource('s3')
if arg.prefix:
objects=s3.Bucket(name=arg.bucket).objects.filter(Prefix=arg.prefix)
num_files=-1
else:
objects=s3.Bucket(name=arg.bucket).objects.filter()
num_files=0
print "Filelist on the S3 bucket:"
filedict={}
for object in objects:
print (object.key)
filedict.update({object.key: object.last_modified})
num_files=num_files + 1
# create new tarball
num_files=num_files+1
print "Creating the tarball:"
tarball_name="%s-%s.tar.gz" % (arg.out, datetime.strftime(datetime.now(),'%Y-%m-%d-%H%M%S'))
tarball_cmd="tar -czvf %s %s" % (tarball_name, arg.out)
tarball_output = subprocess.check_output(tarball_cmd, shell=True)
print tarball_output
# remove dumped files
print "Removing temporary dump files..."
shutil.rmtree(arg.out)
# upload the new tarball to s3
remote_file="%s/%s" % (arg.prefix,os.path.basename(tarball_name))
print "Uploading %s to Amazon S3..." % tarball_name
s3_client = boto3.client('s3')
s3.meta.client.upload_file(tarball_name, arg.bucket, remote_file)
# remove temporary tarball
print "Removing temporary local tarball..."
os.remove(tarball_name)
# keep de the last N dumps on s3: removes the oldest ones
# remove the first element of array if prefix (dirname) was used
prefix= arg.prefix + "/"
if arg.prefix:
del filedict[arg.prefix + "/"]
sorted_filedict=sorted(filedict.items(), key=operator.itemgetter(1))
for item in sorted_filedict[0:len(sorted_filedict)-arg.number]:
print "Deleting file from S3: %s" % item[0]
object = s3.Object(arg.bucket, item[0]).delete()
if __name__ == '__main__':
sys.exit(main())
| mit | 922,675,742,225,475,800 | 34.720721 | 97 | 0.618411 | false |
polojacky/ehfpi | ehf/analysis/views.py | 1 | 128002 | import csv
from collections import defaultdict
import json
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.db.models import Q
from django.template import RequestContext
from django.shortcuts import HttpResponseRedirect
from django.utils.encoding import smart_str, smart_unicode
from browse.browse_view_models import allEHFPI
from browse.models import publication, gene
from analysis.forms import networkForm
from ehf.settings import URL_PREFIX, PKL_DIR
from ehf.commonVar import fieldDic, field, fieldDes
from analysis.models import *
#serilization
import os
import cPickle as pickle
import hashlib
#chartit
from chartit import DataPool, Chart
# Create your views here.
def index(request):
return render_to_response('analysis/index.html')
#create with article and taxonomy info, used in gea and network analysis
def generateTree():
# construct tree
resultB = taxonomy.objects.filter(kingdom="bacteria").values()
resultV = taxonomy.objects.filter(kingdom="virus").values()
resultF = taxonomy.objects.filter(kingdom="fungi").values()
# generate tree view list
treeB = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
treeV = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
treeF = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
tree_taxonomy = {}
# bacteria
for item in resultB:
kingdom = item['kingdom']
# virus has family, bacteria has genus
genus = item['genus']
species = item['species']
pubmedId = item['pubmedId']
#display author,journal and pubmed_id info
pub = publication.objects.filter(pubmedId=pubmedId)[0]
firstAuthor = pub.firstAuthor
year = pub.year
title = pub.title
tree_taxonomy[kingdom] = item['kingdomTaxonomy']
tree_taxonomy[genus] = item['genusTaxonomy']
tree_taxonomy[species] = item['speciesTaxonomy']
tree_taxonomy[firstAuthor + '-' + str(year) + '-' + title] = pubmedId
if firstAuthor + '-' + str(year) + '-' + title not in treeB[kingdom][genus][species]:
treeB[kingdom][genus][species].append(firstAuthor + '-' + str(year) + '-' + title)
# virus
for item in resultV:
kingdom = item['kingdom']
# virus has family, bacteria has genus
family = item['family']
species = item['species']
pubmedId = item['pubmedId']
#display author,journal and pubmed_id info
pub = publication.objects.filter(pubmedId=pubmedId)[0]
firstAuthor = pub.firstAuthor
year = pub.year
title = pub.title
tree_taxonomy[kingdom] = item['kingdomTaxonomy']
tree_taxonomy[family] = item['familyTaxonomy']
tree_taxonomy[species] = item['speciesTaxonomy']
tree_taxonomy[firstAuthor + '-' + str(year) + '-' + title] = pubmedId
if firstAuthor + '-' + str(year) + '-' + title not in treeV[kingdom][family][species]:
treeV[kingdom][family][species].append(firstAuthor + '-' + str(year) + '-' + title)
# fungi
for item in resultF:
kingdom = item['kingdom']
# virus has family, bacteria has genus, fungi we only use genus
genus = item['genus']
species = item['species']
pubmedId = item['pubmedId']
#display author,journal and pubmed_id info
pub = publication.objects.filter(pubmedId=pubmedId)[0]
firstAuthor = pub.firstAuthor
year = pub.year
title = pub.title
tree_taxonomy[kingdom] = item['kingdomTaxonomy']
tree_taxonomy[genus] = item['genusTaxonomy']
tree_taxonomy[species] = item['speciesTaxonomy']
tree_taxonomy[firstAuthor + '-' + str(year) + '-' + title] = pubmedId
if firstAuthor + '-' + str(year) + '-' + title not in treeF[kingdom][genus][species]:
treeF[kingdom][genus][species].append(firstAuthor + '-' + str(year) + '-' + title)
# a three level tree
for obj in treeB:
treeB[obj].default_factory = None
for item in treeB[obj]:
treeB[obj][item].default_factory = None
for obj in treeV:
treeV[obj].default_factory = None
for item in treeV[obj]:
treeV[obj][item].default_factory = None
for obj in treeF:
treeF[obj].default_factory = None
for item in treeF[obj]:
treeF[obj][item].default_factory = None
treeB = dict(treeB)
treeV = dict(treeV)
treeF = dict(treeF)
tree = []
tree.append(treeB)
tree.append(treeV)
tree.append(treeF)
# calculate the badge number start
# notice the number is based on human gene numbers
#the number displayed in badge, we use four dict to avoid duplicate items in four field
result = allEHFPI.objects.all()
badge_taxonomy = defaultdict(int)
kingdomGeneList = defaultdict(list)
familyList = defaultdict(list)
genusList = defaultdict(list)
speciesList = defaultdict(list)
speciesArticleList = defaultdict(list)
allList = []
for item in result:
if item.humanHomolog != '':
if item.humanHomolog not in allList:
badge_taxonomy['all'] += 1
allList.append(item.humanHomolog)
if item.humanHomolog not in kingdomGeneList[item.kingdom]:
badge_taxonomy[item.kingdom] += 1
kingdomGeneList[item.kingdom].append(item.humanHomolog)
if item.family != '':
if item.humanHomolog not in familyList[item.family]:
badge_taxonomy[item.family] += 1
familyList[item.family].append(item.humanHomolog)
if item.genus != '':
if item.humanHomolog not in genusList[item.genus]:
badge_taxonomy[item.genus] += 1
genusList[item.genus].append(item.humanHomolog)
if item.humanHomolog not in speciesList[item.species]:
badge_taxonomy[item.species] += 1
speciesList[item.species].append(item.humanHomolog)
if item.humanHomolog not in speciesArticleList[item.species + '_' + item.firstAuthor + '-' + str(
item.year) + '-' + title]:
badge_taxonomy[item.species + '_' + item.firstAuthor + '-' + str(item.year) + '-' + item.title] += 1
speciesArticleList[
item.species + '_' + item.firstAuthor + '-' + str(item.year) + '-' + item.title].append(
item.humanHomolog)
badge_taxonomy = dict(badge_taxonomy)
#calculate the badge number end
return tree, tree_taxonomy, badge_taxonomy
def gea(request):
if os.path.isfile(PKL_DIR + '/gea.pkl'): #have pickle
file_out = file(PKL_DIR + '/gea.pkl', 'rb')
tree = pickle.load(file_out)
tree_taxonomy = pickle.load(file_out)
badge_taxonomy = pickle.load(file_out)
file_out.close()
else:
tree, tree_taxonomy, badge_taxonomy = generateTree()
#generate pickle
file_gea = file(PKL_DIR + '/gea.pkl', 'wb')
pickle.dump(tree, file_gea, True)
pickle.dump(tree_taxonomy, file_gea, True)
pickle.dump(badge_taxonomy, file_gea, True)
file_gea.close()
return render_to_response('analysis/gea.html',
{'tree': tree, 'tree_taxonomy': tree_taxonomy, 'badge_taxonomy': badge_taxonomy},
context_instance=RequestContext(request))
# given a species list, return the related gene list
def getGeneList(request):
if request.method == 'GET':
pathogen = request.GET.getlist('pathogen[]')
speciesList = []
articleList = []
for item in pathogen:
if item.startswith('species'):
speciesList.append(item[item.find('_') + 1:])
if item.startswith('article'):
articleList.append(item[item.find('_') + 1:])
#a article may contain several species, a species may contain several article. So if species is selected, all article
# under it must be selected too, if a article is selected, we must use and between it and its species!!!
qTotal = Q()
for item in articleList:
speciesItem = item[0:item.find('_')]
pubmedIdItem = item[item.find('_') + 1:]
qTotal = qTotal | (Q(speciesTaxonomy=speciesItem) & Q(pubmedId=pubmedIdItem))
qTotal = qTotal | Q(speciesTaxonomy__in=speciesList)
result = allEHFPI.objects.filter(qTotal)
geneList = []
for item in result:
geneList.append(item.humanHomolog)
geneList = list(set(geneList))
if '' in geneList:
geneList.remove('')
return render_to_response('analysis/getGeneList.html', {'geneList': ','.join(geneList)})
#added: 20140925 return david result
def davidResult(request):
if request.method == 'GET':
pathogen = request.GET['pathogen'].split(',')
speciesList = []
articleList = []
for item in pathogen:
if item.startswith('species'):
speciesList.append(item[item.find('_') + 1:])
if item.startswith('article'):
articleList.append(item[item.find('_') + 1:])
#a article may contain several species, a species may contain several article. So if species is selected, all article
# under it must be selected too, if a article is selected, we must use and between it and its species!!!
qTotal = Q()
for item in articleList:
speciesItem = item[0:item.find('_')]
pubmedIdItem = item[item.find('_') + 1:]
qTotal = qTotal | (Q(speciesTaxonomy=speciesItem) & Q(pubmedId=pubmedIdItem))
qTotal = qTotal | Q(speciesTaxonomy__in=speciesList)
result = allEHFPI.objects.filter(qTotal)
geneList = []
for item in result:
geneList.append(item.humanHomolog)
geneList = list(set(geneList))
if '' in geneList:
geneList.remove('')
#detail page for each item, we use the same function but different param to simplify the implementation
if 'type' in request.GET:
type = request.GET['type']
geneResultList = []
if type == 'bp':
AnnoResult = geneSymbolToGOBP.objects.filter(geneSymbol__in=geneList)
recordNum = len(AnnoResult)
#same geneSymbol aggregate
result = defaultdict(list)
for item in AnnoResult:
result[item.geneSymbol.upper()].append({item.gobp: item.gobpAnnotation})
result = dict(result)
elif type == 'cc':
AnnoResult = geneSymbolToGOCC.objects.filter(geneSymbol__in=geneList)
recordNum = len(AnnoResult)
#same geneSymbol aggregate
result = defaultdict(list)
for item in AnnoResult:
result[item.geneSymbol.upper()].append({item.gocc: item.goccAnnotation})
result = dict(result)
elif type == 'mf':
AnnoResult = geneSymbolToGOMF.objects.filter(geneSymbol__in=geneList)
recordNum = len(AnnoResult)
#same geneSymbol aggregate
result = defaultdict(list)
for item in AnnoResult:
result[item.geneSymbol.upper()].append({item.gomf: item.gomfAnnotation})
result = dict(result)
elif type == 'BBID':
AnnoResult = geneSymbolToPathwayBBID.objects.filter(geneSymbol__in=geneList)
recordNum = len(AnnoResult)
#same geneSymbol aggregate
result = defaultdict(list)
for item in AnnoResult:
result[item.geneSymbol.upper()].append(item.BBID)
result = dict(result)
elif type == 'KEGG':
AnnoResult = geneSymbolToPathwayKEGG.objects.filter(geneSymbol__in=geneList)
recordNum = len(AnnoResult)
#same geneSymbol aggregate
result = defaultdict(list)
for item in AnnoResult:
result[item.geneSymbol.upper()].append({item.KEGG: item.KEGGAnnotation})
result = dict(result)
elif type == 'PANTHER':
AnnoResult = geneSymbolToPathwayPANTHER.objects.filter(geneSymbol__in=geneList)
recordNum = len(AnnoResult)
#same geneSymbol aggregate
result = defaultdict(list)
for item in AnnoResult:
result[item.geneSymbol.upper()].append({item.PANTHER: item.PANTHERAnnotation})
result = dict(result)
elif type == 'REACTOME':
AnnoResult = geneSymbolToPathwayREACTOME.objects.filter(geneSymbol__in=geneList)
recordNum = len(AnnoResult)
#same geneSymbol aggregate
result = defaultdict(list)
for item in AnnoResult:
result[item.geneSymbol.upper()].append({item.REACTOME: item.REACTOMEAnnotation})
result = dict(result)
else:
return HttpResponseRedirect(
URL_PREFIX + '/analysis/gea/') #we do not direct to the result page otherwise the query page
#here return the detail page
# maybe we need to query the gene model to get gene name first since the gene name provided by DAVID is not accurate
for item in AnnoResult.values_list('geneSymbol').distinct():
geneResultList.append(item[0])
print len(geneResultList)
geneNameDict = {}
tmpRes = gene.objects.filter(humanHomolog__in=geneResultList).values('humanHomolog',
'geneDescription').distinct()
for item in tmpRes:
geneNameDict[item['humanHomolog']] = item['geneDescription']
return render_to_response('analysis/davidDetail.html',
{'geneNameDict': geneNameDict, 'result': result, 'type': type,
'recordNum': recordNum})
#statistics default
bpNum = len(geneSymbolToGOBP.objects.filter(geneSymbol__in=geneList).values_list('geneSymbol').distinct())
ccNum = len(geneSymbolToGOCC.objects.filter(geneSymbol__in=geneList).values_list('geneSymbol').distinct())
mfNum = len(geneSymbolToGOMF.objects.filter(geneSymbol__in=geneList).values_list('geneSymbol').distinct())
BBID = len(geneSymbolToPathwayBBID.objects.filter(geneSymbol__in=geneList).values_list('geneSymbol').distinct())
KEGG = len(geneSymbolToPathwayKEGG.objects.filter(geneSymbol__in=geneList).values_list('geneSymbol').distinct())
PANTHER = len(
geneSymbolToPathwayPANTHER.objects.filter(geneSymbol__in=geneList).values_list('geneSymbol').distinct())
REACTOME = len(
geneSymbolToPathwayREACTOME.objects.filter(geneSymbol__in=geneList).values_list('geneSymbol').distinct())
print len(geneList)
print bpNum, ccNum, mfNum, BBID, KEGG, PANTHER, REACTOME
#used to visualize the bar width
maxGO = max(bpNum, ccNum, mfNum)
maxPathway = max(BBID, KEGG, PANTHER, REACTOME)
return render_to_response('analysis/davidResult.html',
{'geneList': ','.join(geneList), 'bpNum': bpNum, 'ccNum': ccNum, 'mfNum': mfNum,
'BBID': BBID,
"KEGG": KEGG, "PANTHER": PANTHER, 'REACTOME': REACTOME, 'maxGO': maxGO,
'maxPathway': maxPathway})
def downAnnotationReport(request):
if 'type' in request.GET and 'pathogen' in request.GET: #
#get gene list
pathogen = request.GET['pathogen'].split(',')
speciesList = []
articleList = []
for item in pathogen:
if item.startswith('species'):
speciesList.append(item[item.find('_') + 1:])
if item.startswith('article'):
articleList.append(item[item.find('_') + 1:])
#a article may contain several species, a species may contain several article. So if species is selected, all article
# under it must be selected too, if a article is selected, we must use and between it and its species!!!
qTotal = Q()
for item in articleList:
speciesItem = item[0:item.find('_')]
pubmedIdItem = item[item.find('_') + 1:]
qTotal = qTotal | (Q(speciesTaxonomy=speciesItem) & Q(pubmedId=pubmedIdItem))
qTotal = qTotal | Q(speciesTaxonomy__in=speciesList)
result = allEHFPI.objects.filter(qTotal)
geneList = []
for item in result:
geneList.append(item.humanHomolog)
geneList = list(set(geneList))
if '' in geneList:
geneList.remove('')
#get annotations
type = request.GET['type']
if type == 'bp':
AnnoResult = geneSymbolToGOBP.objects.filter(geneSymbol__in=geneList).values()
selectcolumn = ['geneSymbol', 'gobp', 'gobpAnnotation']
# code from download app
fieldDesStatistics = {'geneSymbol': 'Gene Symbol',
'gobp': 'GO id',
'gobpAnnotation': 'Annotation'
}
elif type == 'cc':
AnnoResult = geneSymbolToGOCC.objects.filter(geneSymbol__in=geneList).values()
selectcolumn = ['geneSymbol', 'gocc', 'goccAnnotation']
# code from download app
fieldDesStatistics = {'geneSymbol': 'Gene Symbol',
'gocc': 'GO id',
'goccAnnotation': 'Annotation'
}
elif type == 'mf':
AnnoResult = geneSymbolToGOMF.objects.filter(geneSymbol__in=geneList).values()
selectcolumn = ['geneSymbol', 'gomf', 'gomfAnnotation']
# code from download app
fieldDesStatistics = {'geneSymbol': 'Gene Symbol',
'gomf': 'GO id',
'gomfAnnotation': 'Annotation'
}
elif type == 'BBID':
AnnoResult = geneSymbolToPathwayBBID.objects.filter(geneSymbol__in=geneList).values()
selectcolumn = ['geneSymbol', 'BBID']
# code from download app
fieldDesStatistics = {'geneSymbol': 'Gene Symbol',
'BBID': 'BBID',
}
elif type == 'KEGG':
AnnoResult = geneSymbolToPathwayKEGG.objects.filter(geneSymbol__in=geneList).values()
selectcolumn = ['geneSymbol', 'KEGG','KEGGAnnotation']
# code from download app
fieldDesStatistics = {'geneSymbol': 'Gene Symbol',
'KEGG': 'KEGG ID',
'KEGGAnnotation':'KEGG Annotation'
}
elif type == 'PANTHER':
AnnoResult = geneSymbolToPathwayPANTHER.objects.filter(geneSymbol__in=geneList).values()
selectcolumn = ['geneSymbol', 'PANTHER','PANTHERAnnotation']
# code from download app
fieldDesStatistics = {'geneSymbol': 'Gene Symbol',
'PANTHER': 'PANTHER ID',
'PANTHERAnnotation':'PANTHER Annotation'
}
elif type == 'REACTOME':
AnnoResult = geneSymbolToPathwayREACTOME.objects.filter(geneSymbol__in=geneList).values()
selectcolumn = ['geneSymbol', 'REACTOME','REACTOMEAnnotation']
# code from download app
fieldDesStatistics = {'geneSymbol': 'Gene Symbol',
'REACTOME': 'REACTOME ID',
'REACTOMEAnnotation':'REACTOME Annotation'
}
else:
pass
response = HttpResponse(content_type="text/csv")
response.write('\xEF\xBB\xBF')
response['Content-Disposition'] = 'attachment; filename=david.csv'
writer = csv.writer(response)
# store row title description
rowTitle = []
for item in selectcolumn:
rowTitle.append(fieldDesStatistics[item])
writer.writerow(rowTitle)
#get data from database
for item in AnnoResult:
res = []
for i in selectcolumn:
res.append(smart_str(item[i]))
writer.writerow(res)
return response
return HttpResponseRedirect(
URL_PREFIX + '/analysis/gea/') #we do not direct to the result page otherwise the query page
def overlapIndex(request):
return render_to_response('analysis/overlap.html')
#this function process the file and return the gene symbol list
def handle_uploaded_file(uploadFile):
geneList = []
if 'file' in uploadFile:
oriText = ''
for chunk in uploadFile['file'].chunks():
oriText += chunk
oriText = oriText.strip()
geneListTemp = []
if oriText.find(',') > 0:
geneListTemp = oriText.split(',')
elif oriText.find('\r') > 0:
geneListTemp = oriText.split('\r')
elif oriText.find('\n') > 0:
geneListTemp = oriText.split('\n')
else:
geneListTemp = oriText.split('\r\n')
geneList = []
for item in geneListTemp:
geneList.append(item.strip())
geneList = list(set(geneList))
if 'None' in geneList:
geneList.remove('None')
if '' in geneList:
geneList.remove('')
return geneList
def overlapNetwork(request):
#jsTreeList store pathogen tree
#geneList store input gene List
#request.FILES['file'] store uploaded file
if request.method == 'POST':
form = networkForm(request.POST, request.FILES)
if form.is_valid():
form.save()
#process jsTreeList and get gene list
geneList1 = []
#we want to put species submitted above all others, so we must store it in a separate list
aboveSpeciesList = []
pathogen = request.POST['jsTreeList'].strip()
name = '-'.join(pathogen)
name = 'network-' + hashlib.md5(name).hexdigest()
if len(pathogen):
if os.path.isfile(PKL_DIR + '/' + name + '.pkl'): #have pickle
file_out = file(PKL_DIR + '/' + name + '.pkl', 'rb')
geneList1 = pickle.load(file_out)
file_out.close()
else:
pathogen = pathogen.split(',')
speciesList = []
articleList = []
for item in pathogen:
if item.startswith('species'):
speciesList.append(item[item.find('_') + 1:])
if item.startswith('article'):
articleList.append(item[item.find('_') + 1:])
idMap = idNameMap.objects.filter(acc__in=speciesList, type='species')
speciesNameList = []
for item in idMap:
speciesNameList.append(item.name)
if item.name not in aboveSpeciesList:
aboveSpeciesList.append(item.name)
idMap = idNameMap.objects.filter(type='species')
speciesDict = {}
for item in idMap:
speciesDict[item.acc] = item.name
#a article may contain several species, a species may contain several article. So if species is selected, all article
# under it must be selected too, if a article is selected, we must use and between it and its species!!!
taxo = taxonomy.objects.all()
articleDict = defaultdict(list)
for item in taxo: #store pubmed id for each species
articleDict[item.species].append(item.pubmedId)
for key, value in articleDict.items():
articleDict[key] = list(set(value))
qTotal = Q()
for item in articleList:
speciesItem = item[0:item.find('_')]
if speciesDict[speciesItem] not in aboveSpeciesList:
aboveSpeciesList.append(speciesDict[speciesItem])
pubmedIdItem = item[item.find('_') + 1:]
qTotal = qTotal | (Q(species=speciesDict[speciesItem]) & Q(pubmedId=pubmedIdItem))
qTotal = qTotal | Q(species__in=speciesNameList)
result = allEHFPI.objects.filter(qTotal)
for item in result:
geneList1.append(item.humanHomolog) #not gene symbol, since it contains dro.
geneList1 = list(set(geneList1))
#generate pickle
file_network = file(PKL_DIR + '/' + name + '.pkl', 'wb')
pickle.dump(geneList1, file_network, True)
file_network.close()
geneList1 = list(set(geneList1))
if '' in geneList1:
geneList1.remove('')
#process geneList
geneList2Temp = request.POST['geneList'].strip()
geneList2 = []
if len(geneList2Temp):
if geneList2Temp.find(',') > 0:
geneList2Temp = geneList2Temp.split(',')
elif geneList2Temp.find('\r') > 0:
geneList2Temp = geneList2Temp.split('\r')
elif geneList2Temp.find('\n') > 0:
geneList2Temp = geneList2Temp.split('\n')
else:
geneList2Temp = geneList2Temp.split('\r\n')
for item in geneList2Temp:
geneList2.append(item.strip())
geneList2 = list(set(geneList2))
if '' in geneList2:
geneList2.remove('')
#parse uploaded file
geneList3 = handle_uploaded_file(request.FILES)
geneList = geneList1 + geneList2 + geneList3
geneList = list(set(geneList))
return render_to_response('analysis/overlapNetworkOthers.html',
{'geneList': ','.join(geneList), 'aboveSpeciesList': ';'.join(aboveSpeciesList)},
context_instance=RequestContext(request))
else:
form = networkForm()
if os.path.isfile(PKL_DIR + '/network.pkl'): #have pickle
file_out = file(PKL_DIR + '/network.pkl', 'rb')
tree = pickle.load(file_out)
tree_taxonomy = pickle.load(file_out)
badge_taxonomy = pickle.load(file_out)
file_out.close()
else:
tree, tree_taxonomy, badge_taxonomy = generateTree()
#generate pickle
file_network = file(PKL_DIR + '/network.pkl', 'wb')
pickle.dump(tree, file_network, True)
pickle.dump(tree_taxonomy, file_network, True)
pickle.dump(badge_taxonomy, file_network, True)
file_network.close()
return render_to_response('analysis/overlapNetwork.html',
{'tree': tree, 'tree_taxonomy': tree_taxonomy, 'form': form,
'badge_taxonomy': badge_taxonomy}, context_instance=RequestContext(request))
def displayNetwork(request):
if request.method == 'POST' and 'text' in request.POST:
aboveSpeciesList = []
if 'aboveSpeciesList' in request.POST:
tmp = request.POST['aboveSpeciesList'].strip()
if len(tmp):
if ';' in tmp:
aboveSpeciesList = tmp.split(';')
else:
aboveSpeciesList.append(tmp)
genes = request.POST['text'].split(',')
geneList = []
for i in genes:
if len(i.strip()):
geneList.append(i.strip())
geneList = list(set(geneList)) # query gene list
if '' in geneList:
geneList.remove('')
if len(geneList):
result = allEHFPI.objects.filter(humanHomolog__in=geneList).values() #relation list we get
gene_result_tuble = allEHFPI.objects.filter(humanHomolog__in=geneList).values_list(
'humanHomolog').distinct()
pathogen_result_tuble = allEHFPI.objects.filter(humanHomolog__in=geneList).values_list('species').distinct()
gene_result = [] # gene list we get
pathogen_result = [] # pathogen list we get
for i in gene_result_tuble:
gene_result.append(i[0])
for i in pathogen_result_tuble:
pathogen_result.append(i[0])
# generate interaction map start
jsonRes = [] # a list
lineIndex = 0
# generate json file
for item in geneList: # generate gene node
boolIn = 0 # upper case how????
itemStandard = ""
for k in gene_result:
if k.upper() == item.upper():
boolIn = 1
itemStandard = k
if boolIn: # gene in database
node = {}
node['name'] = itemStandard # name attr
#node['name'] = ''
node['id'] = itemStandard #id attr
data = {} #data attr
data['$type'] = 'circle'
data['nodeType'] = 'gene'
for j in result:
if j['humanHomolog'].upper() == item.upper():
data['des'] = j['geneDescription']
break
# set adjacencies attr
adjacencies = []
adjacenciesNumber = 0
for j in result:
if j['humanHomolog'].upper() == item.upper():
relation = {}
relation['nodeTo'] = j['species']
relation['nodeFrom'] = itemStandard
nodeData = {} # can overwrite
lineIndex += 1
#nodeData["$labelid"] = "lineIndex"+str(lineIndex)
#nodeData["$labeltext"] = j['phenotype']
#phenote has three types, inhibit, enhance, other
if j['phenotype'] == 'Inhibited infection':
nodeData["$color"] = "#8b0000"
elif j['phenotype'] == 'Increased infection':
nodeData["$color"] = "#339900"
else: #other type,neither inhibit nor enhance
nodeData["$color"] = "#23A4FF"
relation['data'] = nodeData
adjacencies.append(relation)
adjacenciesNumber = adjacenciesNumber + 1 #calculate common and specific gene
node['adjacencies'] = adjacencies
if adjacenciesNumber > 1:
data['$color'] = '#416D9C'
else:
data['$color'] = '#800080'
node['data'] = data
jsonRes.append(node)
else: # solidate node
node = {}
node['name'] = item # name attr
node['id'] = item #id attr
data = {} #data attr
data['$color'] = 'red'
data['$type'] = 'triangle'
data['des'] = 'this gene is not in EHFPI'
data['nodeType'] = 'gene'
node['data'] = data
adjacencies = []
node['adjacencies'] = adjacencies
jsonRes.append(node)
for item in pathogen_result: # generate pathogen node
node = {}
node['name'] = item # name attr
#node['name'] = ''
node['id'] = item #id attr
data = {} #data attr
data['$color'] = '#EBB056'
data['$type'] = 'star'
data['$dim'] = 8
data['nodeType'] = 'species'
strain_list = []
for j in result:
if j['species'].upper() == item.upper():
strain_list.append(j['strain'])
strain_list = list(set(strain_list))
data['des'] = '_'.join(strain_list)
node['data'] = data
# set adjacencies attr
adjacencies = []
for j in result:
if j['species'].upper() == item.upper():
relation = {}
relation['nodeTo'] = j['humanHomolog']
relation['nodeFrom'] = item
nodeData = {} # can overwrite
relation['data'] = nodeData
adjacencies.append(relation)
node['adjacencies'] = adjacencies
jsonRes.append(node)
toJson = json.dumps(jsonRes)
# generate interaction map end
# calculate gene number of each species
speciesNumber = defaultdict(list)
for item in result:
speciesNumber[item['species']].append(item['humanHomolog'])
speciesNumber = dict(speciesNumber)
for key, value in speciesNumber.items():
speciesNumber[key] = len(list(set(value)))
#store the species submitted above
speciesNumberAbove = {}
for item in aboveSpeciesList:
if item in speciesNumber.keys():
speciesNumberAbove[item] = speciesNumber[item]
speciesNumber.pop(item)
# calculate gene number of each species end
return render_to_response('analysis/displayNetwork.js',
{'toJson': toJson, 'speciesNumber': sorted(speciesNumber.iteritems()),
'speciesNumberAbove': sorted(speciesNumberAbove.iteritems())},
context_instance=RequestContext(request))
else: # empty
return HttpResponseRedirect(URL_PREFIX + '/analysis/overlap/overlapNetwork',
context_instance=RequestContext(request))
else:
return HttpResponseRedirect(URL_PREFIX + '/analysis/overlap/overlapNetwork',
context_instance=RequestContext(request))
#added : 20140812
#function: download the EHF-pathogen graph as a csv file
def downloadCSV(request):
if request.method == 'POST' and 'text' in request.POST:
genes = request.POST['text'].split(',')
geneList = []
for i in genes:
if len(i.strip()):
geneList.append(i.strip())
geneList = list(set(geneList)) # query gene list
if '' in geneList:
geneList.remove('')
if len(geneList):
result_specific = overlapStatistics.objects.filter(geneSymbol__in=geneList, speciesNumber=1).order_by(
'geneSymbol').values()
result_common = overlapStatistics.objects.filter(geneSymbol__in=geneList, speciesNumber__gt=1).order_by(
'geneSymbol').values()
#now generate csv file
selectcolumnCSV = ['geneSymbol', 'speciesNumber', 'speciesList']
# code from download app
fieldDesCSV = {'geneSymbol': 'Gene Symbol',
'speciesNumber': 'Pathogen Number',
'speciesList': 'Pathogen List'
}
response = HttpResponse(content_type="text/csv")
response.write('\xEF\xBB\xBF')
response['Content-Disposition'] = 'attachment; filename=network.csv'
writer = csv.writer(response)
# store row title description
#common gene summary
if len(result_common):
writer.writerow(['Common Gene Summary'])
rowTitle = []
for item in selectcolumnCSV:
rowTitle.append(fieldDesCSV[item])
writer.writerow(rowTitle)
#get data from database
for item in result_common:
res = []
for i in selectcolumnCSV:
res.append(smart_str(item[i]))
writer.writerow(res)
writer.writerow([])
# store row title description
# specific gene summary
if len(result_specific):
writer.writerow(['Specific Gene Summary'])
rowTitle = []
for item in selectcolumnCSV:
rowTitle.append(fieldDesCSV[item])
writer.writerow(rowTitle)
#get data from database
for item in result_specific:
res = []
for i in selectcolumnCSV:
res.append(smart_str(item[i]))
writer.writerow(res)
return response
return HttpResponseRedirect(URL_PREFIX + '/analysis/overlap/overlapNetwork',
context_instance=RequestContext(request))
def overlapHeatMap(request):
if os.path.isfile(PKL_DIR + '/overlap.pkl'): #have pickle
file_out = file(PKL_DIR + '/overlap.pkl', 'rb')
tree = pickle.load(file_out)
tree_taxonomy = pickle.load(file_out)
file_out.close()
else:
# construct tree tree
resultB = taxonomy.objects.filter(kingdom="bacteria").values()
resultV = taxonomy.objects.filter(kingdom="virus").values()
resultF = taxonomy.objects.filter(kingdom="fungi").values()
# generate tree view list
treeB = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
treeV = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
treeF = defaultdict(lambda: defaultdict(list))
#used to map group id
groupRes = idNameMap.objects.filter(type='group')
groupDic = {}
for item in groupRes:
groupDic[item.name] = item.acc
tree_taxonomy = {}
# bacteria
for item in resultB:
kingdom = item['kingdom']
group = item['group']
# virus has family, bacteria has genus
genus = item['genus']
species = item['species']
tree_taxonomy[kingdom] = item['kingdomTaxonomy']
tree_taxonomy[group] = groupDic[item['group']]
tree_taxonomy[genus] = item['genusTaxonomy']
tree_taxonomy[species] = item['speciesTaxonomy']
if species not in treeB[kingdom][group][genus]:
treeB[kingdom][group][genus].append(species)
# virus
for item in resultV:
kingdom = item['kingdom']
group = item['group']
# virus has family, bacteria has genus
family = item['family']
species = item['species']
tree_taxonomy[kingdom] = item['kingdomTaxonomy']
tree_taxonomy[group] = groupDic[item['group']]
tree_taxonomy[family] = item['familyTaxonomy']
tree_taxonomy[species] = item['speciesTaxonomy']
if species not in treeV[kingdom][group][family]:
treeV[kingdom][group][family].append(species)
# fungi
for item in resultF:
kingdom = item['kingdom']
# virus has family, bacteria has genus, fungi we only use genus, and it has no group info
genus = item['genus']
species = item['species']
tree_taxonomy[kingdom] = item['kingdomTaxonomy']
tree_taxonomy[genus] = item['genusTaxonomy']
tree_taxonomy[species] = item['speciesTaxonomy']
if species not in treeF[kingdom][genus]:
treeF[kingdom][genus].append(species)
# a three level tree for fungi, four level for bacteria and fungi
for obj in treeB:
treeB[obj].default_factory = None
for subItem in treeB[obj]:
treeB[obj][subItem].default_factory = None
for obj in treeV:
treeV[obj].default_factory = None
for subItem in treeV[obj]:
treeV[obj][subItem].default_factory = None
for obj in treeF:
treeF[obj].default_factory = None
treeB = dict(treeB)
treeV = dict(treeV)
treeF = dict(treeF)
tree = []
tree.append(treeB)
tree.append(treeV)
tree.append(treeF)
# print tree
# print tree_taxonomy
# for aa in tree:
# for kingdom,value in aa.items():
# print kingdom
# for species,strainList in value.items():
# print "++"+species
# for strain in strainList:
# print "++++"+strain
#generate pickle
file_overlap = file(PKL_DIR + '/overlap.pkl', 'wb')
pickle.dump(tree, file_overlap, True)
pickle.dump(tree_taxonomy, file_overlap, True)
file_overlap.close()
return render_to_response('analysis/overlapHeatMap.html', {'tree': tree, 'tree_taxonomy': tree_taxonomy},
context_instance=RequestContext(request))
def overlapHeatMapArticle(request):
result = taxonomy.objects.all()
article = {}
for item in result:
species = item.species
speciesTaxonomy = item.speciesTaxonomy #species name may have blanks, we use taxonomyId
pubmedId = item.pubmedId
pub = publication.objects.filter(pubmedId=pubmedId)[0]
firstAuthor = pub.firstAuthor
year = pub.year
title = pub.title
article[str(speciesTaxonomy) + '_' + str(pubmedId)] = species + ' [' + firstAuthor + '-' + str(
year) + '-' + title + ']'
return render_to_response('analysis/overlapHeatMapArticle.html', {'article': article},
context_instance=RequestContext(request))
def displayHeatMap(request):
if request.method == 'POST':
level = request.POST['level']
#print level
pathogenList = request.POST.getlist('pathogen[]')
#print pathogenList
name = '-'.join(pathogenList) #too long
name = level + '-' + hashlib.md5(name).hexdigest()
if len(pathogenList):
if os.path.isfile(PKL_DIR + '/' + name + '.pkl'): #have pickle
file_out = file(PKL_DIR + '/' + name + '.pkl', 'rb')
data = pickle.load(file_out)
file_out.close()
else:
# get the dict
result = idNameMap.objects.all()
taxonomyDict = {}
for item in result:
taxonomyDict[item.acc] = item.name
data = {}
if level == 'kingdom': # kingdom level
kingdomList = []
for item in pathogenList:
if item.startswith('kingdom'):
kingdomList.append(item[item.find('_') + 1:])
# construct the json struct
nodes = []
for i, item in enumerate(kingdomList):
node = {}
node['name'] = taxonomyDict[item]
node['acc'] = item # this is used to generate nice url in the heatmap
node['group'] = i + 1 # one kingdom, one group
nodes.append(node)
links = []
# print kingdomList
for i in range(0, len(kingdomList)):
for j in range(i, len(kingdomList)):
res = heatmapModel.objects.filter(
(Q(a=kingdomList[i]) & Q(b=kingdomList[j])) | (
Q(a=kingdomList[j]) & Q(b=kingdomList[i])))
link = {}
for item in res:
link['source'] = i
link['target'] = j
link['value'] = item.commonGeneNumber
link['commonGene'] = item.commonGeneList
links.append(link)
data['nodes'] = nodes
data['links'] = links
data['type'] = 'kingdom'
elif level == 'group': # group level, only one group is allowed, so we detect it in js
groupList = []
for item in pathogenList:
if item.startswith('group'):
groupList.append(item[item.find('_') + 1:])
# construct the json struct
nodes = []
for i, item in enumerate(groupList):
node = {}
node['name'] = taxonomyDict[item]
node['acc'] = item # this is used to generate nice url in the heatmap
node['group'] = i + 1 # one kingdom, one group
nodes.append(node)
links = []
# print kingdomList
for i in range(0, len(groupList)):
for j in range(i, len(groupList)):
res = heatmapModel.objects.filter(
(Q(a=groupList[i]) & Q(b=groupList[j])) | (Q(a=groupList[j]) & Q(b=groupList[i])))
link = {}
for item in res:
link['source'] = i
link['target'] = j
link['value'] = item.commonGeneNumber
link['commonGene'] = item.commonGeneList
links.append(link)
data['nodes'] = nodes
data['links'] = links
data['type'] = 'group'
elif level == 'family': # family level, only virus will be considered
familyList = []
for item in pathogenList:
if item.startswith('family'):
familyList.append(item[item.find('_') + 1:])
# construct the json struct
nodes = []
for i, item in enumerate(familyList):
node = {}
node['name'] = taxonomyDict[item]
node['acc'] = item # this is used to generate nice url in the heatmap
node['group'] = i + 1 # one family, one group, because only virus
nodes.append(node)
links = []
# print familyList
for i in range(0, len(familyList)):
for j in range(i, len(familyList)):
res = heatmapModel.objects.filter(
(Q(a=familyList[i]) & Q(b=familyList[j])) | (Q(a=familyList[j]) & Q(b=familyList[i])))
link = {}
for item in res:
link['source'] = i
link['target'] = j
link['value'] = item.commonGeneNumber
link['commonGene'] = item.commonGeneList
links.append(link)
data['nodes'] = nodes
data['links'] = links
data['type'] = 'family'
elif level == 'genus': # genus level
genusList = []
for item in pathogenList:
if item.startswith('genus'):
genusList.append(item[item.find('_') + 1:])
# construct the json struct
nodes = []
for i, item in enumerate(genusList):
node = {}
node['name'] = taxonomyDict[item]
node['acc'] = item # this is used to generate nice url in the heatmap
node['group'] = i + 1 # grouped based on family or genus
nodes.append(node)
links = []
# print genusList
for i in range(0, len(genusList)):
for j in range(i, len(genusList)):
res = heatmapModel.objects.filter(
(Q(a=genusList[i]) & Q(b=genusList[j])) | (Q(a=genusList[j]) & Q(b=genusList[i])))
link = {}
for item in res:
link['source'] = i
link['target'] = j
link['value'] = item.commonGeneNumber
link['commonGene'] = item.commonGeneList
links.append(link)
data['nodes'] = nodes
data['links'] = links
data['type'] = 'genus'
elif level == 'species': # species level
speciesList = []
for item in pathogenList:
if item.startswith('species'):
speciesList.append(item[item.find('_') + 1:])
# construct the json struct
nodes = []
group = {} #group id dict, acc:groupid
i = 0
for item in speciesList:
node = {}
node['name'] = taxonomyDict[item]
node['acc'] = item # this is used to generate nice url in the heatmap
parent = getParent(item)
# print parent
if group.has_key(parent):
node['group'] = group[parent] # one genus one group, otherwise only fungi and bacteria
else:
node['group'] = i
group[parent] = i
i += 1
nodes.append(node)
links = []
# print speciesList
for i in range(0, len(speciesList)):
for j in range(i, len(speciesList)):
res = heatmapModel.objects.filter(
(Q(a=speciesList[i]) & Q(b=speciesList[j])) | (
Q(a=speciesList[j]) & Q(b=speciesList[i])))
link = {}
for item in res:
link['source'] = i
link['target'] = j
link['value'] = item.commonGeneNumber
link['commonGene'] = item.commonGeneList
links.append(link)
data['nodes'] = nodes
data['links'] = links
data['type'] = 'species'
else:
return render_to_response('analysis/overlapHeatMap.html', context_instance=RequestContext(request))
#generate pickle
file_heatmap = file(PKL_DIR + '/' + name + '.pkl', 'wb')
pickle.dump(data, file_heatmap, True)
file_heatmap.close()
return render_to_response('analysis/displayHeatMap.js', {'data': json.dumps(data)},
context_instance=RequestContext(request))
return render_to_response('analysis/overlapHeatMap.html', context_instance=RequestContext(request))
# article tree
def displayHeatMapArticle(request):
if request.method == 'POST':
articleList = request.POST.getlist('article[]')
name = '-'.join(articleList) #too long
name = "article-" + hashlib.md5(name).hexdigest()
# print articleList
if len(articleList):
if os.path.isfile(PKL_DIR + '/' + name + '.pkl'): #have pickle
file_out = file(PKL_DIR + '/' + name + '.pkl', 'rb')
data = pickle.load(file_out)
file_out.close()
else:
data = {}
article = []
for item in articleList:
if item.startswith('article'): #article: speciesTaxonomy_pubmedId
article.append(item[item.find('_') + 1:])
#display pathogen name
result = idNameMap.objects.all()
taxonomyDict = {}
for item in result:
taxonomyDict[item.acc] = item.name
# construct the json struct
nodes = []
for i, item in enumerate(article):
node = {}
spe = item[0:item.find('_')]
art = item[item.find('_') + 1:]
res = publication.objects.filter(pubmedId=art)[0]
#print res.firstAuthor, res.year
node['name'] = taxonomyDict[spe] + '[' + res.firstAuthor + ',' + str(res.year) + ']'
node['acc'] = item # this is used to generate nice url in the heatmap
node['group'] = i + 1 # one kingdom, one group
nodes.append(node)
links = []
# print article
for i in range(0, len(article)):
for j in range(i, len(article)):
res = heatmapModel.objects.filter(
(Q(a=article[i]) & Q(b=article[j])) | (Q(a=article[j]) & Q(b=article[i])))
link = {}
for item in res:
link['source'] = i
link['target'] = j
link['value'] = item.commonGeneNumber
link['commonGene'] = item.commonGeneList
links.append(link)
data['nodes'] = nodes
data['links'] = links
data['type'] = 'article'
# print data
#generate pickle
file_heatmap = file(PKL_DIR + '/' + name + '.pkl', 'wb')
pickle.dump(data, file_heatmap, True)
file_heatmap.close()
return render_to_response('analysis/displayHeatMap.js', {'data': json.dumps(data)},
context_instance=RequestContext(request))
else:
return render_to_response('analysis/overlapHeatMapArticle.html', context_instance=RequestContext(request))
#given a species taxonomy id, get the genus or family parent taxonomy id
def getParent(child):
result = taxonomy.objects.filter(speciesTaxonomy=child)
for res in result:
familyTaxonomy = res.familyTaxonomy
genusTaxonomy = res.genusTaxonomy
return familyTaxonomy + genusTaxonomy
# return a list of result in the heatmap click
def heatMapResult(request):
if request.method == 'GET':
#change columns?
if 'columns' in request.GET:
selectedColumns_tmp = request.GET['columns']
selectedColumns = selectedColumns_tmp.split(',')
request.session['has_changed'] = True # set the session, not change
request.session['selectedColumns'] = selectedColumns #store the columns
if 'has_changed' not in request.session:
defaultColumns = ['ehfpiAcc', 'geneSymbol', 'targetOrganism', 'strain', 'title']
request.session['selectedColumns'] = defaultColumns #store the columns
a = request.GET['a']
b = request.GET['b']
type = request.GET['type']
searchList = []
searchList.append(a)
searchList.append(b)
searchList = list(set(searchList))
searchName = []
if type == 'article': #for article, species_taxonomy, pubmed id
speciesList = []
articleList = []
for item in searchList:
speciesTemp = item[0:item.find('_')]
articleTemp = item[item.find('_') + 1:]
if speciesTemp not in speciesList:
speciesList.append(speciesTemp)
if articleTemp not in articleList:
articleList.append(articleTemp)
elif type == 'group':
idMap = idNameMap.objects.filter(acc__in=searchList)
for item in idMap:
if item.acc in searchList:
searchName.append(item.name)
else:
searchName = searchList
res = heatmapModel.objects.filter((Q(a=a) & Q(b=b)) | (Q(a=b) & Q(b=a)))
geneList_temp = ''
for item in res:
geneList_temp = smart_unicode(item.commonGeneList)
geneList = geneList_temp.split(';')
result = ''
if type == 'kingdom':
result = allEHFPI.objects.filter(kingdomTaxonomy__in=searchName, humanHomolog__in=geneList)
# print result
elif type == 'group':
result = allEHFPI.objects.filter(group__in=searchName, humanHomolog__in=geneList)
elif type == 'family':
result = allEHFPI.objects.filter(familyTaxonomy__in=searchName, humanHomolog__in=geneList)
elif type == 'genus':
result = allEHFPI.objects.filter(genusTaxonomy__in=searchName, humanHomolog__in=geneList)
elif type == 'species':
result = allEHFPI.objects.filter(speciesTaxonomy__in=searchName, humanHomolog__in=geneList)
elif type == 'article':
result = allEHFPI.objects.filter(speciesTaxonomy__in=speciesList, pubmedId__in=articleList,
humanHomolog__in=geneList)
#print result
else:
result = ''
#custom display columns start!
columns = []
for i in range(0, len(field), 1):
columns.append([field[i], fieldDes[i]])
#custom display columns end
#columns to display start!
displayColumns = request.session['selectedColumns']
displayColumnsDic = []
for item in displayColumns:
displayColumnsDic.append([item, fieldDic[item]])
#columns to display end
# use the method of advanced search
idsList = []
for item in result:
idsList.append(str(item.ehfpiAcc))
ids = ','.join(idsList)
if (len(result)):
#sort the column
order = request.GET.get('order_by', 'ehfpiAcc')
result = result.order_by(order)
#get publication number and EHF gene number
publicationNum = len(set(result.values_list('title')))
geneList1 = []
for item in result.values_list('humanHomolog'):
geneList1.append(item[0].strip().upper())
geneList1 = list(set(geneList1))
if '' in geneList1:
geneList1.remove('')
ehfNum = len(geneList1)
#end
return render_to_response('search/heatMapSearchResult.html',
{'result': result, 'publicationNum': publicationNum, 'ehfNum': ehfNum,
'columns': columns, 'displayColumnsDic': displayColumnsDic, 'ids': ids},
context_instance=RequestContext(request))
else:
return render_to_response('analysis/overlapHeatMap.html', context_instance=RequestContext(request))
#added: 20140710
#return the table which list the pathogen list of a specific gene, this function can facilitate broad - spectrum drug design
def statistics(request):
'''# We can calculate every time, but store the data into database is a better choice, so we can fetch data from database directly
result = allEHFPI.objects.all().values('humanHomolog','strain','species')
numberDict = defaultdict(int)
speciesDict = defaultdict(list)
for item in result:
if item['species'] not in speciesDict[item['humanHomolog']]:
numberDict[item['humanHomolog']] += 1
speciesDict[item['humanHomolog']].append(item['species'])
if '' in numberDict.keys():
numberDict.pop('')
if '' in speciesDict.keys():
speciesDict.pop('')
numberDict = dict(numberDict)
speciesDict = dict(speciesDict)
for key,value in numberDict.items():
p1 = overlapStatistics(geneSymbol=key,speciesNumber=value,speciesList=','.join(speciesDict[key]))
p1.save()
'''
result = overlapStatistics.objects.all().order_by('-speciesNumber')
if (len(result)):
order = request.GET.get('order_by', '-speciesNumber')
result = result.order_by(order)
if 'geneSymbol' in request.GET:
geneSymbol = request.GET['geneSymbol']
geneSymbolListTmp = []
if ',' in geneSymbol:
geneSymbolListTmp = geneSymbol.split(',')
elif ';' in geneSymbol:
geneSymbolListTmp = geneSymbol.split(';')
else:
geneSymbolListTmp.append(geneSymbol)
geneSymbolList = []
for item in geneSymbolListTmp:
if item.strip() != '':
geneSymbolList.append(item.strip())
if len(geneSymbolList):
result = result.filter(geneSymbol__in=geneSymbolList)
idsList = []
for item in result:
idsList.append(item.geneSymbol)
ids = ','.join(idsList)
interactions = len(result)
return render_to_response('analysis/overlapStatistics.html',
{'result': result, 'ids': ids, 'interactions': interactions},
context_instance=RequestContext(request))
#download the gene and related pathogen list
def downloadStatistics(request):
if request.method == 'POST':
print 'hello'
if 'selected[]' in request.POST:
selected = request.POST.getlist('selected[]')
print selected
data = overlapStatistics.objects.filter(geneSymbol__in=selected).values()
selectcolumn = ['geneSymbol', 'speciesNumber', 'speciesList']
# code from download app
fieldDesStatistics = {'geneSymbol': 'Gene Symbol',
'speciesNumber': 'pathogen Number',
'speciesList': 'pathogen List'
}
response = HttpResponse(content_type="text/csv")
response.write('\xEF\xBB\xBF')
response['Content-Disposition'] = 'attachment; filename=statistics.csv'
writer = csv.writer(response)
# store row title description
rowTitle = []
for item in selectcolumn:
rowTitle.append(fieldDesStatistics[item])
writer.writerow(rowTitle)
#get data from database
#data = allEHFPI.objects.values()
for item in data:
res = []
for i in selectcolumn:
res.append(smart_str(item[i]))
writer.writerow(res)
return response
return HttpResponseRedirect(URL_PREFIX)
#this function gives the graph representation of primary as well as confirmed hits distribution
def distribution(request):
'''
result = allEHFPI.objects.exclude(humanHomolog='').values('humanHomolog','species','geneNote')
# store intermediate variable
allNumberDict = defaultdict(int)
allSpeciesDict = defaultdict(list)
confirmedNumberDict = defaultdict(int)
confirmedSpeciesDict = defaultdict(list)
primaryNumberDict = defaultdict(int)
primarySpeciesDict = defaultdict(list)
for item in result:
if item['species'] not in allSpeciesDict[item['humanHomolog']]:
allNumberDict[item['humanHomolog']] += 1
allSpeciesDict[item['humanHomolog']].append(item['species'])
if item['geneNote'] == 'confirmed hits': #confirmed hits
if item['species'] not in confirmedSpeciesDict[item['humanHomolog']]:
confirmedNumberDict[item['humanHomolog']] += 1
confirmedSpeciesDict[item['humanHomolog']].append(item['species'])
else:
if item['species'] not in primarySpeciesDict[item['humanHomolog']]:
primaryNumberDict[item['humanHomolog']] += 1
primarySpeciesDict[item['humanHomolog']].append(item['species'])
print confirmedNumberDict
print primaryNumberDict
#next we convert the intermediate into final result
allNumberDict = dict(allNumberDict)
confirmedNumberDict = dict(confirmedNumberDict)
primaryNumberDict = dict(primaryNumberDict)
#for all
allFinalNumber = defaultdict(int)
allFinalList = defaultdict(list)
for key,value in allNumberDict.items():
allFinalNumber[value] += 1
allFinalList[value].append(key)
allFinalNumber = dict(allFinalNumber)
allFinalList = dict(allFinalList)
for key,value in allFinalNumber.items():
p1 = overlapDistribution(pathogenNumber=key,geneNumber=value,geneList=','.join(allFinalList[key]),type='all')
p1.save()
#for confirmed
confirmedFinalNumber = defaultdict(int)
confirmedFinalList = defaultdict(list)
for key,value in confirmedNumberDict.items():
confirmedFinalNumber[value] += 1
confirmedFinalList[value].append(key)
confirmedFinalNumber = dict(confirmedFinalNumber)
confirmedFinalList = dict(confirmedFinalList)
for key,value in confirmedFinalNumber.items():
p1 = overlapDistribution(pathogenNumber=key,geneNumber=value,geneList=','.join(confirmedFinalList[key]),type='confirmed hits')
p1.save()
#for primary
primaryFinalNumber = defaultdict(int)
primaryFinalList = defaultdict(list)
for key,value in primaryNumberDict.items():
primaryFinalNumber[value] += 1
primaryFinalList[value].append(key)
primaryFinalNumber = dict(primaryFinalNumber)
primaryFinalList = dict(primaryFinalList)
for key,value in primaryFinalNumber.items():
p1 = overlapDistribution(pathogenNumber=key,geneNumber=value,geneList=','.join(primaryFinalList[key]),type='primary hits')
p1.save()
'''
dsAll = DataPool(
series=
[{'options': {
'source': overlapDistribution.objects.filter(type='all')},
'terms': [
'pathogenNumber',
'geneNumber'
]}
])
allColumn = Chart(
datasource=dsAll,
series_options=
[{'options': {
'type': 'column'},
'terms': {
'pathogenNumber': [
'geneNumber']
}}],
chart_options=
{
'chart': {
'backgroundColor': '#F3F3FF',
'borderWidth': 1,
'borderRadius': 5,
'plotBackgroundColor': '#ffffff',
'plotShadow': 'false',
'plotBorderWidth': 0,
'plotBorderColor': 'black',
'spacingRight': 30
},
'title': {
'text': 'Distribution of EHFs',
'style': {
'fontWeight': 'bold'
}
},
'subtitle': {
'text': 'all EHF genes (confirmed and primary hits)'
},
'xAxis': {
'title': {
'text': 'Pathogen Number'},
'gridLineWidth': 1,
'labels': {
'style': {
'color': 'black'
}
},
'lineColor': 'black',
'lineWidth': 1
},
'yAxis': {
'title': {
'text': 'EHF Gene Number'},
'gridLineWidth': 1,
'minorTickInterval': 'auto',
'type': 'logarithmic', #'linear', 'logarithmic' and 'datetime'
'labels': {
'style': {
'color': 'black'
}
},
'lineColor': 'black',
'lineWidth': 1
},
'tooltip': {
'backgroundColor': '#ffffff',
'borderColor': '#4572A7',
'borderRadius': 2,
'borderWidth': 1
},
'legend': {
'align': 'left',
'verticalAlign': 'top',
'floating': 'true'
},
'plotOptions': {
'column': {
'pointPadding': 0.2,
'borderWidth': 1,
'color': '#4572A7',
},
'series': {
'shadow': 'true',
'dataLabels': {
'enabled': 'true',
'color': '#4572A7'
}
}
},
})
dsPrimary = DataPool(
series=
[{'options': {
'source': overlapDistribution.objects.filter(type='primary hits')},
'terms': [
'pathogenNumber',
'geneNumber'
]}
])
primaryColumn = Chart(
datasource=dsPrimary,
series_options=
[{'options': {
'type': 'column'
},
'terms': {
'pathogenNumber': ['geneNumber']
}
}],
chart_options=
{
'chart': {
'backgroundColor': '#F3F3FF',
'borderWidth': 1,
'borderRadius': 5,
'plotBackgroundColor': '#ffffff',
'plotShadow': 'false',
'plotBorderWidth': 0,
'plotBorderColor': 'black',
'spacingRight': 30
},
'title': {
'text': 'Distribution of EHFs',
'style': {
'fontWeight': 'bold'
}
},
'subtitle': {
'text': 'primary hits'
},
'xAxis': {
'title': {
'text': 'Pathogen Number'},
'gridLineWidth': 1,
'labels': {
'style': {
'color': 'black'
}
},
'lineColor': 'black',
'lineWidth': 1
},
'yAxis': {
'title': {
'text': 'EHF Gene Number'},
'gridLineWidth': 1,
'minorTickInterval': 'auto',
'type': 'linear', #'linear', 'logarithmic' and 'datetime'
'labels': {
'style': {
'color': 'black'
}
},
'lineColor': 'black',
'lineWidth': 1
},
'tooltip': {
'backgroundColor': '#ffffff',
'borderColor': '#4572A7',
'borderRadius': 2,
'borderWidth': 1
},
'legend': {
'align': 'left',
'verticalAlign': 'top',
'floating': 'true'
},
'plotOptions': {
'column': {
'pointPadding': 0.2,
'borderWidth': 1,
'color': '#4572A7',
},
'series': {
'shadow': 'true',
'dataLabels': {
'enabled': 'true',
'color': '#4572A7'
}
}
},
})
resultAll = overlapDistribution.objects.filter(type='all').filter(pathogenNumber__gte=4).order_by('-pathogenNumber')
resultPrimary = overlapDistribution.objects.filter(type='primary hits').filter(pathogenNumber__gte=3).order_by(
'-pathogenNumber')
return render_to_response('analysis/distribution.html',
{'charts': [allColumn, primaryColumn], 'resultAll': resultAll,
'resultPrimary': resultPrimary},
context_instance=RequestContext(request))
def pip(request):
if 'geneList' in request.GET: #stupid method, otherwise we have to implement our own pagination method, actually we can directed to another link
geneList1 = []
#parse geneList
if request.GET['geneList'].find(',') > 0:
genes = request.GET['geneList'].split(',')
elif request.GET['geneList'].find('\n') > 0:
genes = request.GET['geneList'].split('\n')
elif request.GET['geneList'].find('\r') > 0:
genes = request.GET['geneList'].split('\r')
else:
genes = request.GET['geneList'].split('\r\n')
for i in genes:
if len(i.strip()):
geneList1.append(i.strip())
geneList1 = list(set(geneList1)) # query gene list
#parse pathogen
geneList2 = []
pathogen = request.GET.getlist('pathogen[]')
speciesList = []
for item in pathogen:
if item.startswith('species'):
speciesList.append(item[item.find('_') + 1:])
result = allEHFPI.objects.filter(speciesTaxonomy__in=speciesList)
for item in result:
geneList2.append(item.humanHomolog)
geneList2 = list(set(geneList2))
#print geneList
geneList = geneList1 + geneList2
geneList = list(set(geneList))
if '' in geneList:
geneList.remove('')
inEHFPI = allEHFPI.objects.filter(humanHomolog__in=geneList)
ehfpiList = []
GeneNumberIn = 0 #number of genes in EHFPI
for item in inEHFPI:
if item.humanHomolog != '' and item.humanHomolog not in ehfpiList:
ehfpiList.append(item.humanHomolog)
GeneNumberIn += 1
#get the geneSymbol-VTP model
result = vtpModel.objects.filter(geneSymbol__in=geneList)
GeneNumberSubmit = len(geneList) # number of gene submitted
interactions = len(result) #number of genes in EHFPI
GeneNumberVTP = 0 # number of genes that are also VTP
vtpList = []
for item in result:
if item.geneSymbol not in vtpList:
vtpList.append(item.geneSymbol)
GeneNumberVTP += 1
if (len(result)):
order = request.GET.get('order_by', 'geneSymbol')
result = result.order_by(order)
return render_to_response('analysis/getVTPList.html',
{'GeneNumberSubmit': GeneNumberSubmit, 'GeneNumberIn': GeneNumberIn,
'interactions': interactions,
'GeneNumberVTP': GeneNumberVTP, 'result': result, 'ids': ','.join(vtpList)},
context_instance=RequestContext(request))
else: # for pagination consideration
if os.path.isfile(PKL_DIR + '/pip.pkl'): #have pickle
file_out = file(PKL_DIR + '/pip.pkl', 'rb')
tree = pickle.load(file_out)
tree_taxonomy = pickle.load(file_out)
file_out.close()
else:
# construct tree tree
resultB = taxonomy.objects.filter(kingdom="bacteria").values()
resultV = taxonomy.objects.filter(kingdom="virus").values()
resultF = taxonomy.objects.filter(kingdom="fungi").values()
# generate tree view list
treeB = defaultdict(lambda: defaultdict(list))
treeV = defaultdict(lambda: defaultdict(list))
treeF = defaultdict(lambda: defaultdict(list))
tree_taxonomy = {}
# bacteria
for item in resultB:
kingdom = item['kingdom']
# virus has family, bacteria has genus
genus = item['genus']
species = item['species']
tree_taxonomy[kingdom] = item['kingdomTaxonomy']
tree_taxonomy[genus] = item['genusTaxonomy']
tree_taxonomy[species] = item['speciesTaxonomy']
if species not in treeB[kingdom][genus]:
treeB[kingdom][genus].append(species)
# virus
for item in resultV:
kingdom = item['kingdom']
# virus has family, bacteria has genus
family = item['family']
species = item['species']
tree_taxonomy[kingdom] = item['kingdomTaxonomy']
tree_taxonomy[family] = item['familyTaxonomy']
tree_taxonomy[species] = item['speciesTaxonomy']
if species not in treeV[kingdom][family]:
treeV[kingdom][family].append(species)
# fungi
for item in resultF:
kingdom = item['kingdom']
# virus has family, bacteria has genus, fungi we only use genus
genus = item['genus']
species = item['species']
tree_taxonomy[kingdom] = item['kingdomTaxonomy']
tree_taxonomy[genus] = item['genusTaxonomy']
tree_taxonomy[species] = item['speciesTaxonomy']
if species not in treeF[kingdom][genus]:
treeF[kingdom][genus].append(species)
# a three level tree
for obj in treeB:
treeB[obj].default_factory = None
for obj in treeV:
treeV[obj].default_factory = None
for obj in treeF:
treeF[obj].default_factory = None
treeB = dict(treeB)
treeV = dict(treeV)
treeF = dict(treeF)
tree = []
tree.append(treeB)
tree.append(treeV)
tree.append(treeF)
#generate pickle
file_pip = file(PKL_DIR + '/pip.pkl', 'wb')
pickle.dump(tree, file_pip, True)
pickle.dump(tree_taxonomy, file_pip, True)
file_pip.close()
return render_to_response('analysis/pip.html', {'tree': tree, 'tree_taxonomy': tree_taxonomy})
# given a gene list or species list, return the related vtp gene list
def getVTPList(request):
if request.method == 'GET' and 'source' in request.GET:
source = request.GET['source']
geneList = []
if source == 'geneList':
genes = request.GET['geneList'].split(',')
for i in genes:
if len(i.strip()):
geneList.append(i.strip())
geneList = list(set(geneList)) # query gene list
elif source == 'pathogen':
pathogen = request.GET.getlist('pathogen[]')
speciesList = []
for item in pathogen:
if item.startswith('species'):
speciesList.append(item[item.find('_') + 1:])
idMap = idNameMap.objects.filter(acc__in=speciesList, type='species')
speciesNameList = []
for item in idMap:
speciesNameList.append(item.name)
result = allEHFPI.objects.filter(species__in=speciesNameList)
for item in result:
geneList.append(item.humanHomolog)
geneList = list(set(geneList))
else:
print 'impossible'
#print geneList
if '' in geneList:
geneList.remove('')
inEHFPI = allEHFPI.objects.filter(humanHomolog__in=geneList)
ehfpiList = []
GeneNumberIn = 0 #number of genes in EHFPI
for item in inEHFPI:
if item.humanHomolog != '' and item.humanHomolog not in ehfpiList:
ehfpiList.append(item.humanHomolog)
GeneNumberIn += 1
#get the geneSymbol-VTP model
result = vtpModel.objects.filter(geneSymbol__in=geneList)
GeneNumberSubmit = len(geneList) # number of gene submitted
interactions = len(result) #interactions number
GeneNumberVTP = 0 # number of genes that are also VTP
vtpList = []
for item in result:
if item.geneSymbol not in vtpList:
vtpList.append(item.geneSymbol)
GeneNumberVTP += 1
return render_to_response('analysis/getVTPList.html',
{'GeneNumberSubmit': GeneNumberSubmit, 'GeneNumberIn': GeneNumberIn,
'interactions': interactions,
'GeneNumberVTP': GeneNumberVTP, 'ids': ','.join(vtpList), 'result': result},
context_instance=RequestContext(request))
def download(request):
if request.method == 'POST':
if 'selected[]' in request.POST:
selected = request.POST.getlist('selected[]')
data = vtpModel.objects.filter(geneSymbol__in=selected).values()
selectcolumn = ['geneSymbol', 'proteinName', 'uniprotId', 'virusTaxid', 'virusName', 'resources', 'note']
# code from download app
fieldDesVTP = {'geneSymbol': 'Gene Symbol',
'proteinName': 'Protein Name',
'uniprotId': 'UniProt ID',
'virusTaxid': 'Virus Taxid',
'virusName': 'Virus Name',
'resources': 'Resources',
'note': 'note'
}
response = HttpResponse(content_type="text/csv")
response.write('\xEF\xBB\xBF')
response['Content-Disposition'] = 'attachment; filename=pip.csv'
writer = csv.writer(response)
# store row title description
rowTitle = []
for item in selectcolumn:
rowTitle.append(fieldDesVTP[item])
writer.writerow(rowTitle)
#get data from database
#data = allEHFPI.objects.values()
for item in data:
res = []
for i in selectcolumn:
res.append(smart_str(item[i]))
writer.writerow(res)
return response
return HttpResponseRedirect(URL_PREFIX)
def network(request):
if request.method == 'POST':
if 'selected' in request.POST:
selected = request.POST['selected']
selected = selected.split(',')
return render_to_response('analysis/overlapNetworkOthers.html', {'geneList': ','.join(selected)},
context_instance=RequestContext(request))
return HttpResponseRedirect(URL_PREFIX)
#ppi network for pip analysis
def ppiOthers(request):
if request.method == 'POST':
if 'selected' in request.POST:
selected = request.POST['selected']
return render_to_response('analysis/ppiOthers.html', {'geneList': selected},context_instance=RequestContext(request))
return HttpResponseRedirect(URL_PREFIX)
#genrate the ppi network json data
def displayPPI(request):
if request.method == 'POST' and 'geneList' in request.POST:
genes = request.POST['geneList'].split(',')
geneList = []
for i in genes:
if len(i.strip()):
geneList.append(i.strip())
geneList = list(set(geneList)) # query gene list
if '' in geneList:
geneList.remove('')
qTotal = Q(geneSymbol1__in=geneList) | Q(geneSymbol2__in=geneList)
result = ppi.objects.filter(qTotal)
#calculate the degree of each node first
degree = defaultdict(int)
for item in result:
degree[item.geneSymbol1] += 1
degree[item.geneSymbol2] += 1
degree = dict(degree)
#print sorted(degree.items(), key=lambda d: d[1]) //list the degree of each node
toJson = {}
#nodes
nodes = []
#edges
edges = []
for item in result:
#first node
node1 = {}
dataAttr = {}
dataAttr['id'] = item.geneSymbol1
dataAttr['name'] = item.geneSymbol1
dataAttr['refseqId'] = item.refseqId1
dataAttr['weight'] = degree[item.geneSymbol1]
dataAttr['height'] = degree[item.geneSymbol1]
#dataAttr['des'] = 'gene name'
dataAttr['hprd'] = item.hprdId1
node1['data'] = dataAttr
if item.geneSymbol1 in geneList: #submitted
node1['classes'] = 'submitted'
else:
node1['classes'] = 'other'
nodes.append(node1) #add node1
#second node
node2 = {}
dataAttr = {}
dataAttr['id'] = item.geneSymbol2
dataAttr['name'] = item.geneSymbol2
dataAttr['refseqId'] = item.refseqId2
dataAttr['weight'] = degree[item.geneSymbol2]
dataAttr['height'] = degree[item.geneSymbol2]
#dataAttr['des'] = 'gene name'
dataAttr['hprd'] = item.hprdId2
node2['data'] = dataAttr
if item.geneSymbol2 in geneList: #submitted
node2['classes'] = 'submitted'
else:
node2['classes'] = 'other'
nodes.append(node2) #add node2
#edge
edge = {}
dataAttr = {}
dataAttr['source'] = item.geneSymbol1
dataAttr['target'] = item.geneSymbol2
dataAttr['expType'] = item.expType
dataAttr['pubmedId'] = item.pubmedId
edge['data'] = dataAttr
edges.append(edge)
toJson['nodes'] = nodes
toJson['edges'] = edges
toJson = json.dumps(toJson)
#using ppi interaction data from HPRD
return render_to_response('analysis/displayPPI.js', {'toJson': toJson},
context_instance=RequestContext(request))
return HttpResponseRedirect(URL_PREFIX + '/analysis/pip/')
#download the ppi network into a csv file
def downloadPPI(request):
if request.method == 'POST' and 'geneList' in request.POST:
genes = request.POST['geneList'].split(',')
geneList = []
for i in genes:
if len(i.strip()):
geneList.append(i.strip())
geneList = list(set(geneList)) # query gene list
if '' in geneList:
geneList.remove('')
qTotal = Q(geneSymbol1__in=geneList) | Q(geneSymbol2__in=geneList)
result = ppi.objects.filter(qTotal).values()
selectcolumn = ['geneSymbol1', 'hprdId1','refseqId1','geneSymbol2','hprdId2','refseqId2','expType','pubmedId']
fieldDesStatistics = {'geneSymbol1': 'Gene Symbol of protein 1',
'hprdId1': 'HPRD ID of protein 1',
'refseqId1':'refseq ID of protein 1',
'geneSymbol2': 'Gene Symbol of protein 2',
'hprdId2': 'HPRD ID of protein 2',
'refseqId2':'refseq ID of protein 2',
'expType':'experiment type',
'pubmedId':'PUBMED ID'}
response = HttpResponse(content_type="text/csv")
response.write('\xEF\xBB\xBF')
response['Content-Disposition'] = 'attachment; filename=ppi.csv'
writer = csv.writer(response)
# store row title description
rowTitle = []
for item in selectcolumn:
rowTitle.append(fieldDesStatistics[item])
writer.writerow(rowTitle)
#get data from database
for item in result:
res = []
for i in selectcolumn:
res.append(smart_str(item[i]))
writer.writerow(res)
return response
return HttpResponseRedirect(
URL_PREFIX + '/analysis/pip/') #we do not direct to the result page otherwise the query page
#gwas analysis, same as gea analysis
def gwasIndex(request):
if os.path.isfile(PKL_DIR + '/gwas.pkl'): #have pickle
file_out = file(PKL_DIR + '/gwas.pkl', 'rb')
tree = pickle.load(file_out)
tree_taxonomy = pickle.load(file_out)
badge_taxonomy = pickle.load(file_out)
file_out.close()
else:
tree, tree_taxonomy, badge_taxonomy = generateTree()
#generate pickle
file_gwas = file(PKL_DIR + '/gwas.pkl', 'wb')
pickle.dump(tree, file_gwas, True)
pickle.dump(tree_taxonomy, file_gwas, True)
pickle.dump(badge_taxonomy, file_gwas, True)
file_gwas.close()
return render_to_response('analysis/gwas.html',
{'tree': tree, 'tree_taxonomy': tree_taxonomy, 'badge_taxonomy': badge_taxonomy},
context_instance=RequestContext(request))
#process submitted data
def gwasResults(request):
if request.method == 'GET':
if 'jsTreeList' in request.GET:
jsTreeList = request.GET['jsTreeList']
result, aboveSpeciesList = geneListForGwasAndDrug(jsTreeList)
geneList = []
for item in result:
geneList.append(item.humanHomolog)
geneList = set(geneList)
if '' in geneList:
geneList.remove('')
#result = []
GeneNumberGWAS = []
#resultTmp = gwas.objects.all()
# for item in resultTmp:
# if item.mappedGene.strip() != '-':
# genes = []
# if ' - ' in item.mappedGene:
# genes = item.mappedGene.strip().split(' - ')
# elif ';' in item.mappedGene:
# genes = item.mappedGene.strip().split(';')
# else:
# genes.append(item.mappedGene.strip())
# if len(geneList & set(genes)) > 0:
# GeneNumberGWAS = GeneNumberGWAS + list(geneList & set(genes))
# result.append(item)
# for item in resultTmp:
# genes = []
# if ', ' in item.reportedGene:
# genes = item.reportedGene.strip().split(', ')
# else:
# genes.append(item.reportedGene.strip())
# if len(geneList & set(genes)) > 0:
# GeneNumberGWAS = GeneNumberGWAS + list(geneList & set(genes))
# result.append(item)
#
result = gwas.objects.filter(reportedGene__in=geneList)
speciesAll = [] #all species
for item in result:
GeneNumberGWAS.append(item.reportedGene)
speciesAll.append(item.species)
#other species, not submitted
speciesAll = list(set(speciesAll))
otherSpecies = list(set(speciesAll))
#dict
speciesDict = {}
speciesDictReverse = {} #used in template to store taxonomy info
aboveSpecies = []
idMap = idNameMap.objects.filter(name__in=speciesAll, type='species')
for item in idMap:
speciesDict[item.acc] = item.name
speciesDictReverse[item.name] = item.acc
for item in aboveSpeciesList:
if item in speciesDict.keys():
otherSpecies.remove(speciesDict[item])
aboveSpecies.append(speciesDict[item])
#note some species may not have any drug information
aboveSpecies.sort()
otherSpecies.sort()
GeneNumberSubmit = len(geneList)
GeneNumberGWAS = len(set(GeneNumberGWAS))
#if column in the url, we filter the result
if 'columns' in request.GET:
columns = request.GET['columns']
if len(columns.strip()) > 0:
columns = columns.split(',')
speciesSelected = []
for item in columns:
speciesSelected.append(speciesDict[item])
result = result.filter(species__in=speciesSelected)
interactions = len(result)
#sort the table
if (len(result)):
order = request.GET.get('order_by', 'reportedGene')
result = result.order_by(order)
#sort the column
#
# #stupid method, too slow, but do we have any answer to this problem
# if order.startswith('-'):
# result.sort(lambda x, y: -cmp(getattr(x, order[1:]), getattr(y, order[1:])))
# else:
# result.sort(lambda x, y: cmp(getattr(x, order), getattr(y, order)))
# gwasDict = defaultdict(set)
# for aa in result:
# gwasDict[aa.reportedGene].add(aa['species'])
#
# gwasDict = dict(gwasDict)
# myDic = {}
# for key,value in gwasDict.items():
# myDic[key] = len(value)
# print sorted(myDic.items(), key=lambda d: d[1])
return render_to_response('analysis/gwasResults.html',
{'result': result, 'GeneNumberSubmit': GeneNumberSubmit,
'interactions': interactions, 'GeneNumberGWAS': GeneNumberGWAS,
'aboveSpecies': aboveSpecies, 'otherSpecies': otherSpecies,
'speciesDictReverse': speciesDictReverse},
context_instance=RequestContext(request))
return HttpResponseRedirect(URL_PREFIX + '/analysis/gwas/')
#gwas download
def gwasDownload(request):
if request.method == 'GET':
if 'type' in request.GET:
type = request.GET['type']
if 'selected' in request.GET:
selected = request.GET['selected']
if type == 'all':
result, aboveSpeciesList = geneListForGwasAndDrug(selected[11:])
geneList = []
for item in result:
geneList.append(item.humanHomolog)
geneList = set(geneList)
if '' in geneList:
geneList.remove('')
#data = []
#resultTmp = gwas.objects.all().values()
# for item in resultTmp:
# if item['mappedGene'].strip() != '-':
# genes = []
# if ' - ' in item['mappedGene']:
# genes = item['mappedGene'].strip().split(' - ')
# elif ';' in item['mappedGene']:
# genes = item['mappedGene'].strip().split(';')
# else:
# genes.append(item['mappedGene'].strip())
# if len(geneList & set(genes)) > 0:
# data.append(item)
# for item in resultTmp:
# genes = []
# if ', ' in item['reportedGene']:
# genes = item['reportedGene'].strip().split(', ')
# else:
# genes.append(item['reportedGene'].strip())
# if len(geneList & set(genes)) > 0:
# data.append(item)
data = gwas.objects.filter(reportedGene__in=geneList)
#if column in the url, we filter the result
if 'columns' in request.GET:
columns = request.GET['columns']
if len(columns.strip()) > 0:
columns = columns.split(',')
speciesSelected = []
speciesDict = {}
idMap = idNameMap.objects.filter(type='species')
for item in idMap:
speciesDict[item.acc] = item.name
for item in columns:
speciesSelected.append(speciesDict[item])
data = data.filter(species__in=speciesSelected)
data = data.values()
else:
selected = request.GET['selected'].split(',')
data = gwas.objects.filter(acc__in=selected).values()
selectcolumn = ['pubmedId', 'firstAuthor', 'journal', 'link', 'study', 'disease',
'initialSampleSize', 'replicationSampleSize', 'cytogeneticLoc', 'chrId', 'charPos',
'reportedGene', 'mappedGene', 'upstreamGeneId', 'downstreamGeneId', 'snpGeneId',
'upstreamGeneDistance', 'downstreamGeneDistance', 'strongSnpAllele', 'snps', 'merged',
'snpIdCurrent', 'context', 'interGenetic', 'riskAlleleFreq', 'pvalue', 'pvalueMLog',
'pvalueText',
'orOrBeta', 'ci', 'platform', 'cnv']
# code from download app
fieldDesDrug = {'pubmedId': 'PUBMEDID',
'firstAuthor': 'First Author',
'journal': 'Journal',
'link': 'Link',
'study': 'Study',
'disease': 'Disease/Trait',
'initialSampleSize': 'Initial Sample Size',
'replicationSampleSize': 'Replication Sample Size',
'cytogeneticLoc': 'Region',
'chrId': 'Chr_id',
'charPos': 'Chr_pos',
'reportedGene': 'Reported Gene(s)',
'mappedGene': 'Mapped_gene',
'upstreamGeneId': 'Upstream_gene_id',
'downstreamGeneId': 'Downstream_gene_id',
'snpGeneId': 'Snp_gene_ids',
'upstreamGeneDistance': 'Upstream_gene_distance',
'downstreamGeneDistance': 'Downstream_gene_distance',
'strongSnpAllele': 'Strongest SNP-Risk Allele',
'snps': 'SNPs',
'merged': 'Merged',
'snpIdCurrent': 'Snp_id_current',
'context': 'Context',
'interGenetic': 'Intergenic',
'riskAlleleFreq': 'Risk Allele Frequency',
'pvalue': 'p-Value',
'pvalueMLog': 'Pvalue_mlog',
'pvalueText': 'p-Value (text)',
'orOrBeta': 'OR or beta',
'ci': '95% CI (text)',
'platform': 'Platform [SNPs passing QC]',
'cnv': 'CNV'
}
response = HttpResponse(content_type="text/csv")
response.write('\xEF\xBB\xBF')
response['Content-Disposition'] = 'attachment; filename=gwas.csv'
writer = csv.writer(response)
# store row title description
rowTitle = []
for item in selectcolumn:
rowTitle.append(fieldDesDrug[item])
writer.writerow(rowTitle)
#get data from database
for item in data:
res = []
for i in selectcolumn:
res.append(smart_str(item[i]))
writer.writerow(res)
return response
return HttpResponseRedirect(URL_PREFIX + '/analysis/gwas/')
#drug target analysis
def drug(request):
if os.path.isfile(PKL_DIR + '/drug.pkl'): #have pickle
file_out = file(PKL_DIR + '/drug.pkl', 'rb')
tree = pickle.load(file_out)
tree_taxonomy = pickle.load(file_out)
badge_taxonomy = pickle.load(file_out)
file_out.close()
else:
tree, tree_taxonomy, badge_taxonomy = generateTree()
#generate pickle
file_drug = file(PKL_DIR + '/drug.pkl', 'wb')
pickle.dump(tree, file_drug, True)
pickle.dump(tree_taxonomy, file_drug, True)
pickle.dump(badge_taxonomy, file_drug, True)
file_drug.close()
return render_to_response('analysis/drug.html',
{'tree': tree, 'tree_taxonomy': tree_taxonomy, 'badge_taxonomy': badge_taxonomy},
context_instance=RequestContext(request))
#process submitted data
def drugResults(request):
#get to support pagination
if request.method == 'GET':
if 'jsTreeList' in request.GET and 'geneList' in request.GET:
#get gene list start
jsTreeList = request.GET['jsTreeList']
geneList1 = []
aboveSpeciesList = []
if len(jsTreeList.strip()): #select jsTree
result, aboveSpeciesList = geneListForGwasAndDrug(jsTreeList)
for item in result:
geneList1.append(item.humanHomolog)
geneList2Tmp = request.GET['geneList']
geneList22 = []
geneList2 = []
if ',' in geneList2Tmp:
geneList22 = geneList2Tmp.split(',')
elif ';' in geneList2Tmp:
geneList22 = geneList2Tmp.split(';')
elif '\r' in geneList2Tmp:
geneList22 = geneList2Tmp.split('\r')
elif '\r\n' in geneList2Tmp:
geneList22 = geneList2Tmp.split('\r\n')
else:
geneList22.append(geneList2Tmp.strip())
for item in geneList22:
geneList2.append(item.strip())
geneList = list(set(geneList1) | set(geneList2))
if '' in geneList:
geneList.remove('')
#get gene list end
GeneDrug = [] #record gene number
DrugList = [] #recored drug number
speciesAll = [] #all species
result = drugModelWithInt.objects.filter(geneSymbol__in=geneList)
for item in result:
GeneDrug.append(item.geneSymbol)
DrugList.append(item.drugbankId)
speciesAll.append(item.species)
#other species, not submitted
speciesAll = list(set(speciesAll))
otherSpecies = list(set(speciesAll))
#dict
speciesDict = {}
speciesDictReverse = {} #used in template to store taxonomy info
aboveSpecies = []
idMap = idNameMap.objects.filter(name__in=speciesAll, type='species')
for item in idMap:
speciesDict[item.acc] = item.name
speciesDictReverse[item.name] = item.acc
for item in aboveSpeciesList:
if item in speciesDict.keys():
otherSpecies.remove(speciesDict[item])
aboveSpecies.append(speciesDict[item])
#note some species may not have any drug information
aboveSpecies.sort()
otherSpecies.sort()
GeneNumberSubmit = len(geneList)
GeneNumberDrug = len(set(GeneDrug))
DrugNumber = len(set(DrugList))
#if column in the url, we filter the result
if 'columns' in request.GET:
columns = request.GET['columns']
if len(columns.strip()) > 0:
columns = columns.split(',')
speciesSelected = []
for item in columns:
speciesSelected.append(speciesDict[item])
result = result.filter(species__in=speciesSelected)
interactions = len(result)
#sort the table
if len(result):
#sort the column
order = request.GET.get('order_by', 'species')
result = result.order_by(order)
return render_to_response('analysis/drugResults.html',
{'result': result, 'GeneNumberSubmit': GeneNumberSubmit,
'interactions': interactions, 'GeneNumberDrug': GeneNumberDrug,
'DrugNumber': DrugNumber,
'aboveSpecies': aboveSpecies, 'otherSpecies': otherSpecies,
'speciesDictReverse': speciesDictReverse},
context_instance=RequestContext(request))
return HttpResponseRedirect(URL_PREFIX + '/analysis/drug/')
#download drug data
def drugDownload(request):
if request.method == 'GET':
if 'type' in request.GET:
type = request.GET['type']
if 'selected' in request.GET and 'geneList' in request.GET:
selected = request.GET['selected']
if type == 'all':
result, aboveSpeciesList = geneListForGwasAndDrug(selected)
geneList1 = []
for item in result:
geneList1.append(item.humanHomolog)
geneList2Tmp = request.GET['geneList']
geneList22 = []
geneList2 = []
if ',' in geneList2Tmp:
geneList22 = geneList2Tmp.split(',')
elif ';' in geneList2Tmp:
geneList22 = geneList2Tmp.split(';')
elif '\r' in geneList2Tmp:
geneList22 = geneList2Tmp.split('\r')
elif '\r\n' in geneList2Tmp:
geneList22 = geneList2Tmp.split('\r\n')
else:
geneList22.append(geneList2Tmp.strip())
for item in geneList22:
geneList2.append(item.strip())
geneList = list(set(geneList1) | set(geneList2))
if '' in geneList:
geneList.remove('')
data = drugModelWithInt.objects.filter(geneSymbol__in=geneList)
#if column in the url, we filter the result
if 'columns' in request.GET:
columns = request.GET['columns']
if len(columns.strip()) > 0:
columns = columns.split(',')
speciesSelected = []
speciesDict = {}
idMap = idNameMap.objects.filter(type='species')
for item in idMap:
speciesDict[item.acc] = item.name
for item in columns:
speciesSelected.append(speciesDict[item])
data = data.filter(species__in=speciesSelected)
data = data.values()
else:
selected = request.GET['selected'].split(',')
data = drugModelWithInt.objects.filter(acc__in=selected).values()
selectcolumn = ['species', 'speciesTaxonomy', 'geneSymbol', 'hgncId', 'uniprotId', 'proteinName',
'drugbankId', 'drugName',
'drugType', 'drugGroup']
# code from download app
fieldDesDrug = {'species': 'Pathogen(species)',
'speciesTaxonomy': 'Species Taxonomy',
'geneSymbol': 'Gene Symbol',
'hgncId': 'HGNC ID',
'uniprotId': 'UniProt ID',
'proteinName': 'Protein Name',
'drugbankId': 'DrugBank ID',
'drugName': 'Drug Name',
'drugType': 'Drug Type',
'drugGroup': 'Drug Group'
}
response = HttpResponse(content_type="text/csv")
response.write('\xEF\xBB\xBF')
response['Content-Disposition'] = 'attachment; filename=drug.csv'
writer = csv.writer(response)
# store row title description
rowTitle = []
for item in selectcolumn:
rowTitle.append(fieldDesDrug[item])
writer.writerow(rowTitle)
#get data from database
for item in data:
res = []
for i in selectcolumn:
res.append(smart_str(item[i]))
writer.writerow(res)
return response
return HttpResponseRedirect(URL_PREFIX + '/analysis/drug/')
def drugNetwork(request):
if request.method == 'POST':
if 'type' in request.POST:
type = request.POST['type']
if 'selected' in request.POST and 'geneList' in request.POST:
selected = request.POST['selected']
#print selected
if type == 'all':
#replace %2C with ,
if '%2C' in selected:
selected = selected.replace('%2C', ',')
#we get aboveSpeciesList here to display in the network. note we show all related relations, not the selected
#store taxonomy id is enough, not name
result, aboveSpeciesList = geneListForGwasAndDrug(selected)
geneList1 = []
for item in result:
geneList1.append(item.humanHomolog)
geneList2Tmp = request.POST['geneList']
#replace %2C with ,
if '%2C' in geneList2Tmp:
geneList2Tmp = geneList2Tmp.replace('%2C', ',')
geneList22 = []
geneList2 = []
if ',' in geneList2Tmp:
geneList22 = geneList2Tmp.split(',')
elif ';' in geneList2Tmp:
geneList22 = geneList2Tmp.split(';')
elif '\r' in geneList2Tmp:
geneList22 = geneList2Tmp.split('\r')
elif '\r\n' in geneList2Tmp:
geneList22 = geneList2Tmp.split('\r\n')
else:
geneList22.append(geneList2Tmp.strip())
for item in geneList22:
geneList2.append(item.strip())
geneList = list(set(geneList1) | set(geneList2))
if '' in geneList:
geneList.remove('')
data = drugModelWithInt.objects.filter(geneSymbol__in=geneList)
#if column in the url, we filter the result
if 'columns' in request.POST:
columns = request.POST['columns']
if len(columns.strip()) > 0:
columns = columns.split(',')
speciesSelected = []
speciesDict = {}
idMap = idNameMap.objects.filter(type='species')
for item in idMap:
speciesDict[item.acc] = item.name
for item in columns:
speciesSelected.append(speciesDict[item])
data = data.filter(species__in=speciesSelected)
data = data.values_list('acc')
else:
aboveSpeciesList = []
selected = request.POST['selected'].split(',')
data = drugModelWithInt.objects.filter(acc__in=selected).values_list('acc')
#we should pass
accList = []
for item in data:
if item[0] not in accList:
accList.append(str(item[0]))
return render_to_response('analysis/drugNetworkOthers.html',
{'accList': ','.join(accList),
'aboveSpeciesList': ';'.join(aboveSpeciesList)})
return HttpResponseRedirect(URL_PREFIX + '/analysis/drug/')
#for gwas and drug, given a tree, generate result
def geneListForGwasAndDrug(jsTreeList):
pathogen = []
aboveSpeciesList = []
if ',' in jsTreeList:
pathogen = jsTreeList.split(',')
else:
pathogen.append(jsTreeList.strip())
speciesList = []
articleList = []
for item in pathogen:
if item.startswith('species'):
speciesList.append(item[item.find('_') + 1:])
if item.startswith('article'):
articleList.append(item[item.find('_') + 1:])
aboveSpeciesList = speciesList
#a article may contain several species, a species may contain several article. So if species is selected, all article
# under it must be selected too, if a article is selected, we must use and between it and its species!!!
qTotal = Q()
for item in articleList:
speciesItem = item[0:item.find('_')]
aboveSpeciesList.append(speciesItem)
pubmedIdItem = item[item.find('_') + 1:]
qTotal = qTotal | (Q(speciesTaxonomy=speciesItem) & Q(pubmedId=pubmedIdItem))
qTotal = qTotal | Q(speciesTaxonomy__in=speciesList)
result = allEHFPI.objects.filter(qTotal)
aboveSpeciesList = list(set(aboveSpeciesList))
return result, aboveSpeciesList
#render the drug network, this is for drugModel,since in this model, there is no pathogen info.
# def drugDisplayNetworkOld(request):
# if request.method == 'POST' and 'text' in request.POST:
# aboveSpeciesList = []
# if 'aboveSpeciesList' in request.POST:
# tmp = request.POST['aboveSpeciesList'].strip()
# if len(tmp):
# if ';' in tmp:
# aboveSpeciesList = tmp.split(';')
# else:
# aboveSpeciesList.append(tmp)
#
# accList = request.POST['text'].split(',')
#
# #using accList in drug Model to get gene list
# geneList = []
# drugResult = drugModel.objects.filter(acc__in=accList).values('geneSymbol', 'drugbankId', 'drugName')
# for item in drugResult:
# geneList.append(item['geneSymbol'])
#
# geneList = list(set(geneList)) # query gene list
# if '' in geneList:
# geneList.remove('')
#
# if len(geneList):
# allResult = allEHFPI.objects.filter(humanHomolog__in=geneList).values('humanHomolog', 'species',
# 'strain') #relation list we get
#
# #get drug list and pathogen list
# drugList = []
# pathogenList = []
# for item in drugResult:
# drugList.append(item['drugbankId'])
# for item in allResult:
# pathogenList.append(item['species'])
# drugList = list(set(drugList))
# pathogenList = list(set(pathogenList))
#
# # calculate drug number of each species
# speciesNumber = defaultdict(list)
#
# # generate interaction network start
# jsonRes = [] # a list
#
# # generate json file
# for item in drugList:
# node = {}
# node['name'] = item # name attr
# node['id'] = item #id attr
#
# data = {} #data attr
# data['$type'] = 'circle'
# data['nodeType'] = 'drug'
#
# # set adjacencies attr
# adjacencies = []
# adjacenciesNumber = 0
#
# for drugItem in drugResult: # generate drug node
# if item == drugItem['drugbankId']: #
# data['des'] = drugItem['drugName'] #drug name
#
# for allItem in allResult:
# if allItem['humanHomolog'].upper() == drugItem['geneSymbol'].upper(): #gene is the same
# relation = {}
# relation['nodeTo'] = allItem['species']
# relation['nodeFrom'] = drugItem['drugbankId']
# nodeData = {} # can overwrite, edge attribute(display linked gene)
# #nodeData["$color"] = "#8b0000"
# #nodeData["$color"] = "#339900"
# nodeData["$color"] = "#23A4FF"
# nodeData['gene'] = drugItem['geneSymbol']
# relation['data'] = nodeData
# adjacencies.append(relation)
# adjacenciesNumber = adjacenciesNumber + 1 #calculate common and specific gene
#
# node['adjacencies'] = adjacencies
# if adjacenciesNumber > 1:
# data['$color'] = '#416D9C'
# else:
# data['$color'] = '#800080'
#
# node['data'] = data
#
# jsonRes.append(node)
#
# # generate json file
# for item in pathogenList:
# node = {}
# node['name'] = item # name attr
# node['id'] = item #id attr
#
# data = {} #data attr
# data['$color'] = '#EBB056'
# data['$type'] = 'triangle'
# data['nodeType'] = 'species'
#
# # set adjacencies attr
# adjacencies = []
#
# strain_list = []
# for allItem in allResult: # generate pathogen node
# if allItem['species'] == item:
# strain_list.append(allItem['strain'])
#
# for drugItem in drugResult:
# if drugItem['geneSymbol'] == allItem['humanHomolog']:
# speciesNumber[item].append(drugItem['drugbankId'])
# relation = {}
# relation['nodeTo'] = drugItem['drugbankId']
# relation['nodeFrom'] = allItem['species']
# nodeData = {} # can overwrite
# nodeData["$color"] = "#23A4FF"
# nodeData['gene'] = drugItem['geneSymbol']
# relation['data'] = nodeData
# adjacencies.append(relation)
#
# strain_list = list(set(strain_list))
# data['des'] = '_'.join(strain_list)
# node['data'] = data
# node['adjacencies'] = adjacencies
# jsonRes.append(node)
#
# toJson = json.dumps(jsonRes)
# # generate interaction map end
#
# speciesNumber = dict(speciesNumber)
# for key, value in speciesNumber.items():
# speciesNumber[key] = len(list(set(value)))
#
# #store the species submitted above
# idMap = idNameMap.objects.filter(type='species', acc__in=aboveSpeciesList).values('acc', 'name')
# idToName = {}
# for item in idMap:
# idToName[item['acc']] = item['name']
#
# speciesNumberAbove = {}
# for item in aboveSpeciesList:
# if idToName[item] in speciesNumber.keys():
# speciesNumberAbove[idToName[item]] = speciesNumber[idToName[item]]
# speciesNumber.pop(idToName[item])
#
# # calculate gene number of each species end
#
# return render_to_response('analysis/displayDrugNetwork.js',
# {'toJson': toJson, 'speciesNumber': sorted(speciesNumber.iteritems()),
# 'speciesNumberAbove': sorted(speciesNumberAbove.iteritems())})
# else: # empty
# return HttpResponseRedirect(URL_PREFIX + '/analysis/overlap/overlapNetwork')
# else:
# return HttpResponseRedirect(URL_PREFIX + '/analysis/overlap/overlapNetwork')
#render the drug network, this is for drugModelWithInt, since pathogen info is in the model
def drugDisplayNetwork(request):
if request.method == 'POST' and 'text' in request.POST:
aboveSpeciesList = []
if 'aboveSpeciesList' in request.POST:
tmp = request.POST['aboveSpeciesList'].strip()
if len(tmp):
if ';' in tmp:
aboveSpeciesList = tmp.split(';')
else:
aboveSpeciesList.append(tmp)
accList = request.POST['text'].split(',')
drugResult = drugModelWithInt.objects.filter(acc__in=accList).values('species', 'speciesTaxonomy', 'strain',
'geneSymbol', 'drugbankId', 'drugName',
'drugGroup')
# drugDict = defaultdict(set)
# for aa in drugResult:
# drugDict[aa['drugName']].add(aa['species'])
#
# drugDict = dict(drugDict)
# myDic = {}
# for key,value in drugDict.items():
# myDic[key] = len(value)
# print sorted(myDic.items(), key=lambda d: d[1])
#get drug list and pathogen list
drugList = []
pathogenList = []
for item in drugResult:
drugList.append(item['drugName'])
for item in drugResult:
pathogenList.append(item['species'])
drugList = list(set(drugList))
pathogenList = list(set(pathogenList))
# calculate drug number of each species
speciesNumber = defaultdict(list)
# generate interaction network start
jsonRes = [] # a list
# generate json file
for item in drugList:
node = {}
node['name'] = item # name attr
node['id'] = item #id attr
data = {} #data attr
data['$type'] = 'circle'
data['nodeType'] = 'drug'
# set adjacencies attr
adjacencies = []
#adjacenciesNumber = 0
for drugItem in drugResult: # generate drug node
if item == drugItem['drugName']: #
data['des'] = drugItem['drugbankId'] #drug name
relation = {}
relation['nodeTo'] = drugItem['species']
relation['nodeFrom'] = drugItem['drugName']
nodeData = {} # can overwrite, edge attribute(display linked gene)
nodeData["$color"] = "#23A4FF"
nodeData['gene'] = drugItem['geneSymbol']
relation['data'] = nodeData
adjacencies.append(relation)
#adjacenciesNumber = adjacenciesNumber + 1 #calculate common and specific gene
node['adjacencies'] = adjacencies
#if adjacenciesNumber > 1:
if drugItem['drugGroup'] == 'approved': #approved drug
data['$color'] = '#416D9C'
data['drugGroup'] = 'approved'
else:
data['$color'] = '#800080'
data['drugGroup'] = 'other'
node['data'] = data
jsonRes.append(node)
# generate json file
for item in pathogenList:
node = {}
node['name'] = item # name attr
node['id'] = item #id attr
data = {} #data attr
data['$color'] = '#EBB056'
data['$type'] = 'triangle'
data['nodeType'] = 'species'
# set adjacencies attr
adjacencies = []
strain_list = []
for drugItem in drugResult: # generate pathogen node
if drugItem['species'] == item:
strain_list.append(drugItem['strain'])
speciesNumber[item].append(drugItem['drugbankId'])
relation = {}
relation['nodeTo'] = drugItem['drugName']
relation['nodeFrom'] = drugItem['species']
nodeData = {} # can overwrite
nodeData["$color"] = "#23A4FF"
nodeData['gene'] = drugItem['geneSymbol']
relation['data'] = nodeData
adjacencies.append(relation)
strain_list = list(set(strain_list))
data['des'] = '_'.join(strain_list)
node['data'] = data
node['adjacencies'] = adjacencies
jsonRes.append(node)
toJson = json.dumps(jsonRes)
# generate interaction map end
speciesNumber = dict(speciesNumber)
for key, value in speciesNumber.items():
speciesNumber[key] = len(list(set(value)))
#store the species submitted above
idMap = idNameMap.objects.filter(type='species', acc__in=aboveSpeciesList).values('acc', 'name')
idToName = {}
for item in idMap:
idToName[item['acc']] = item['name']
speciesNumberAbove = {}
for item in aboveSpeciesList:
if idToName[item] in speciesNumber.keys():
speciesNumberAbove[idToName[item]] = speciesNumber[idToName[item]]
speciesNumber.pop(idToName[item])
# calculate gene number of each species end
return render_to_response('analysis/displayDrugNetwork.js',
{'toJson': toJson, 'speciesNumber': sorted(speciesNumber.iteritems()),
'speciesNumberAbove': sorted(speciesNumberAbove.iteritems())})
return HttpResponseRedirect(URL_PREFIX + '/analysis/overlap/overlapNetwork')
| apache-2.0 | 2,985,895,231,448,524,000 | 39.37918 | 149 | 0.50943 | false |
AfonsoFGarcia/swift | swift/common/splice.py | 13 | 5458 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Bindings to the `tee` and `splice` system calls
'''
import os
import operator
import ctypes
import ctypes.util
__all__ = ['tee', 'splice']
c_loff_t = ctypes.c_long
# python 2.6 doesn't have c_ssize_t
c_ssize_t = getattr(ctypes, 'c_ssize_t', ctypes.c_long)
class Tee(object):
'''Binding to `tee`'''
__slots__ = '_c_tee',
def __init__(self):
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
try:
c_tee = libc.tee
except AttributeError:
self._c_tee = None
return
c_tee.argtypes = [
ctypes.c_int,
ctypes.c_int,
ctypes.c_size_t,
ctypes.c_uint
]
c_tee.restype = c_ssize_t
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'tee: %s' % os.strerror(errno))
else:
return result
c_tee.errcheck = errcheck
self._c_tee = c_tee
def __call__(self, fd_in, fd_out, len_, flags):
'''See `man 2 tee`
File-descriptors can be file-like objects with a `fileno` method, or
integers.
Flags can be an integer value, or a list of flags (exposed on
`splice`).
This function returns the number of bytes transferred (i.e. the actual
result of the call to `tee`).
Upon other errors, an `IOError` is raised with the proper `errno` set.
'''
if not self.available:
raise EnvironmentError('tee not available')
if not isinstance(flags, (int, long)):
c_flags = reduce(operator.or_, flags, 0)
else:
c_flags = flags
c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
return self._c_tee(c_fd_in, c_fd_out, len_, c_flags)
@property
def available(self):
'''Availability of `tee`'''
return self._c_tee is not None
tee = Tee()
del Tee
class Splice(object):
'''Binding to `splice`'''
# From `bits/fcntl-linux.h`
SPLICE_F_MOVE = 1
SPLICE_F_NONBLOCK = 2
SPLICE_F_MORE = 4
SPLICE_F_GIFT = 8
__slots__ = '_c_splice',
def __init__(self):
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
try:
c_splice = libc.splice
except AttributeError:
self._c_splice = None
return
c_loff_t_p = ctypes.POINTER(c_loff_t)
c_splice.argtypes = [
ctypes.c_int, c_loff_t_p,
ctypes.c_int, c_loff_t_p,
ctypes.c_size_t,
ctypes.c_uint
]
c_splice.restype = c_ssize_t
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'splice: %s' % os.strerror(errno))
else:
off_in = arguments[1]
off_out = arguments[3]
return (
result,
off_in.contents.value if off_in is not None else None,
off_out.contents.value if off_out is not None else None)
c_splice.errcheck = errcheck
self._c_splice = c_splice
def __call__(self, fd_in, off_in, fd_out, off_out, len_, flags):
'''See `man 2 splice`
File-descriptors can be file-like objects with a `fileno` method, or
integers.
Flags can be an integer value, or a list of flags (exposed on this
object).
Returns a tuple of the result of the `splice` call, the output value of
`off_in` and the output value of `off_out` (or `None` for any of these
output values, if applicable).
Upon other errors, an `IOError` is raised with the proper `errno` set.
Note: if you want to pass `NULL` as value for `off_in` or `off_out` to
the system call, you must pass `None`, *not* 0!
'''
if not self.available:
raise EnvironmentError('splice not available')
if not isinstance(flags, (int, long)):
c_flags = reduce(operator.or_, flags, 0)
else:
c_flags = flags
c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
c_off_in = \
ctypes.pointer(c_loff_t(off_in)) if off_in is not None else None
c_off_out = \
ctypes.pointer(c_loff_t(off_out)) if off_out is not None else None
return self._c_splice(
c_fd_in, c_off_in, c_fd_out, c_off_out, len_, c_flags)
@property
def available(self):
'''Availability of `splice`'''
return self._c_splice is not None
splice = Splice()
del Splice
| apache-2.0 | -7,913,026,739,531,388,000 | 26.019802 | 79 | 0.56376 | false |
noironetworks/nova | nova/api/openstack/compute/server_metadata.py | 17 | 7880 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import server_metadata
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _
ALIAS = 'server-metadata'
authorize = extensions.os_compute_authorizer(ALIAS)
class ServerMetadataController(wsgi.Controller):
"""The server metadata API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
super(ServerMetadataController, self).__init__()
def _get_metadata(self, context, server_id):
server = common.get_instance(self.compute_api, context, server_id)
try:
# NOTE(mikal): get_instanc_metadata sometimes returns
# InstanceNotFound in unit tests, even though the instance is
# fetched on the line above. I blame mocking.
meta = self.compute_api.get_instance_metadata(context, server)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
meta_dict = {}
for key, value in six.iteritems(meta):
meta_dict[key] = value
return meta_dict
@extensions.expected_errors(404)
def index(self, req, server_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
authorize(context, action='index')
return {'metadata': self._get_metadata(context, server_id)}
@extensions.expected_errors((400, 403, 404, 409, 413))
# NOTE(gmann): Returns 200 for backwards compatibility but should be 201
# as this operation complete the creation of metadata.
@validation.schema(server_metadata.create)
def create(self, req, server_id, body):
metadata = body['metadata']
context = req.environ['nova.context']
authorize(context, action='create')
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=False)
return {'metadata': new_metadata}
@extensions.expected_errors((400, 403, 404, 409, 413))
@validation.schema(server_metadata.update)
def update(self, req, server_id, id, body):
context = req.environ['nova.context']
authorize(context, action='update')
meta_item = body['meta']
if id not in meta_item:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
self._update_instance_metadata(context,
server_id,
meta_item,
delete=False)
return {'meta': meta_item}
@extensions.expected_errors((400, 403, 404, 409, 413))
@validation.schema(server_metadata.update_all)
def update_all(self, req, server_id, body):
context = req.environ['nova.context']
authorize(context, action='update_all')
metadata = body['metadata']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=True)
return {'metadata': new_metadata}
def _update_instance_metadata(self, context, server_id, metadata,
delete=False):
try:
server = common.get_instance(self.compute_api, context, server_id)
return self.compute_api.update_instance_metadata(context,
server,
metadata,
delete)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'update metadata', server_id)
@extensions.expected_errors(404)
def show(self, req, server_id, id):
"""Return a single metadata item."""
context = req.environ['nova.context']
authorize(context, action='show')
data = self._get_metadata(context, server_id)
try:
return {'meta': {id: data[id]}}
except KeyError:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
@extensions.expected_errors((404, 409))
@wsgi.response(204)
def delete(self, req, server_id, id):
"""Deletes an existing metadata."""
context = req.environ['nova.context']
authorize(context, action='delete')
metadata = self._get_metadata(context, server_id)
if id not in metadata:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
server = common.get_instance(self.compute_api, context, server_id)
try:
self.compute_api.delete_instance_metadata(context, server, id)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete metadata', server_id)
class ServerMetadata(extensions.V21APIExtensionBase):
"""Server Metadata API."""
name = "ServerMetadata"
alias = ALIAS
version = 1
def get_resources(self):
parent = {'member_name': 'server',
'collection_name': 'servers'}
resources = [extensions.ResourceExtension('metadata',
ServerMetadataController(),
member_name='server_meta',
parent=parent,
custom_routes_fn=
self.server_metadata_map
)]
return resources
def get_controller_extensions(self):
return []
def server_metadata_map(self, mapper, wsgi_resource):
mapper.connect("metadata",
"/{project_id}/servers/{server_id}/metadata",
controller=wsgi_resource,
action='update_all', conditions={"method": ['PUT']})
| apache-2.0 | 1,516,575,754,828,251,400 | 39.829016 | 78 | 0.578173 | false |
chrisfilda/edx_platform | lms/djangoapps/certificates/management/commands/regenerate_user.py | 10 | 3455 | """Django management command to force certificate regeneration for one user"""
from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from certificates.queue import XQueueCertInterface
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
class Command(BaseCommand):
help = """Put a request on the queue to recreate the certificate for a particular user in a particular course."""
option_list = BaseCommand.option_list + (
make_option('-n', '--noop',
action='store_true',
dest='noop',
default=False,
help="Don't grade or add certificate requests to the queue"),
make_option('--insecure',
action='store_true',
dest='insecure',
default=False,
help="Don't use https for the callback url to the LMS, useful in http test environments"),
make_option('-c', '--course',
metavar='COURSE_ID',
dest='course',
default=False,
help='The course id (e.g., mit/6-002x/circuits-and-electronics) for which the student named in'
'<username> should be graded'),
make_option('-u', '--user',
metavar='USERNAME',
dest='username',
default=False,
help='The username or email address for whom grading and certification should be requested'),
make_option('-G', '--grade',
metavar='GRADE',
dest='grade_value',
default=None,
help='The grade string, such as "Distinction", which should be passed to the certificate agent'),
make_option('-T', '--template',
metavar='TEMPLATE',
dest='template_file',
default=None,
help='The template file used to render this certificate, like "QMSE01-distinction.pdf"'),
)
def handle(self, *args, **options):
user = options['username']
course_id = options['course']
if not (course_id and user):
raise CommandError('both course id and student username are required')
student = None
print "Fetching enrollment for student {0} in {1}".format(user, course_id)
if '@' in user:
student = User.objects.get(email=user, courseenrollment__course_id=course_id)
else:
student = User.objects.get(username=user, courseenrollment__course_id=course_id)
print "Fetching course data for {0}".format(course_id)
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=2)
if not options['noop']:
# Add the certificate request to the queue
xq = XQueueCertInterface()
if options['insecure']:
xq.use_https = False
ret = xq.regen_cert(student, course_id, course=course,
forced_grade=options['grade_value'],
template_file=options['template_file'])
print '{0} - {1}'.format(student, ret)
else:
print "noop option given, skipping work queueing..."
| agpl-3.0 | 5,911,720,304,419,070,000 | 43.87013 | 117 | 0.566425 | false |
rupran/ansible | lib/ansible/modules/network/nxos/nxos_vrf_af.py | 21 | 9307 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vrf_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VRF AF.
description:
- Manages VRF AF
author: Gabriele Gerbino (@GGabriele)
notes:
- Default, where supported, restores params default value.
options:
vrf:
description:
- Name of the VRF.
required: true
afi:
description:
- Address-Family Identifier (AFI).
required: true
choices: ['ipv4', 'ipv6']
default: null
safi:
description:
- Sub Address-Family Identifier (SAFI).
required: true
choices: ['unicast', 'multicast']
default: null
route_target_both_auto_evpn:
description:
- Enable/Disable the EVPN route-target 'auto' setting for both
import and export target communities.
required: false
choices: ['true', 'false']
default: null
state:
description:
- Determines whether the config should be present or
not on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vrf_af:
interface: nve1
vni: 6000
ingress_replication: true
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"afi": "ipv4", "route_target_both_auto_evpn": true,
"safi": "unicast", "vrf": "test"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"afi": "ipv4", "route_target_both_auto_evpn": false,
"safi": "unicast", "vrf": "test"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"afi": "ipv4", "route_target_both_auto_evpn": true,
"safi": "unicast", "vrf": "test"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["vrf context test", "address-family ipv4 unicast",
"route-target both auto evpn"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
BOOL_PARAMS = ['route_target_both_auto_evpn']
PARAM_TO_COMMAND_KEYMAP = {
'route_target_both_auto_evpn': 'route-target both auto evpn',
}
PARAM_TO_DEFAULT_KEYMAP = {}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
parents = ['vrf context {0}'.format(module.params['vrf'])]
parents.append('address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
config = netcfg.get_section(parents)
if config:
splitted_config = config.splitlines()
vrf_index = False
for index in range(0, len(splitted_config) - 1):
if 'vrf' in splitted_config[index].strip():
vrf_index = index
break
if vrf_index:
config = '\n'.join(splitted_config[0:vrf_index])
for arg in args:
if arg not in ['afi', 'safi', 'vrf']:
existing[arg] = get_value(arg, config, module)
existing['afi'] = module.params['afi']
existing['safi'] = module.params['safi']
existing['vrf'] = module.params['vrf']
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
parents = ['vrf context {0}'.format(module.params['vrf'])]
parents.append('address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ['vrf context {0}'.format(module.params['vrf'])]
commands.append('no address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
vrf=dict(required=True, type='str'),
safi=dict(required=True, type='str', choices=['unicast','multicast']),
afi=dict(required=True, type='str', choices=['ipv4','ipv6']),
route_target_both_auto_evpn=dict(required=False, type='bool'),
m_facts=dict(required=False, default=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
state = module.params['state']
args = [
'vrf',
'safi',
'afi',
'route_target_both_auto_evpn'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,546,682,287,198,624,000 | 30.3367 | 97 | 0.590523 | false |
seibert-media/Highton | highton/models/user.py | 1 | 1872 | from highton import call_mixins
from highton.models import HightonModel
from highton.highton_constants import HightonConstants
from highton import fields
class User(
HightonModel,
call_mixins.ListCallMixin,
call_mixins.DetailCallMixin,
):
"""
:ivar id: fields.IntegerField(name=HightonConstants.ID)
:ivar name: fields.StringField(name=HightonConstants.NAME)
:ivar email_address: fields.StringField(name=HightonConstants.EMAIL_ADDRESS)
:ivar admin: fields.BooleanField(name=HightonConstants.ADMIN)
:ivar token: fields.StringField(name=HightonConstants.TOKEN)
:ivar dropbox: fields.StringField(name=HightonConstants.DROPBOX)
:ivar created_at: fields.DatetimeField(name=HightonConstants.CREATED_AT)
:ivar updated_at: fields.DatetimeField(name=HightonConstants.UPDATED_AT)
"""
ENDPOINT = HightonConstants.USERS
TAG_NAME = HightonConstants.USER
def __init__(self, **kwargs):
self.name = fields.StringField(name=HightonConstants.NAME)
self.email_address = fields.StringField(name=HightonConstants.EMAIL_ADDRESS)
self.admin = fields.BooleanField(name=HightonConstants.ADMIN)
self.token = fields.StringField(name=HightonConstants.TOKEN)
self.dropbox = fields.StringField(name=HightonConstants.DROPBOX)
self.created_at = fields.DatetimeField(name=HightonConstants.CREATED_AT)
self.updated_at = fields.DatetimeField(name=HightonConstants.UPDATED_AT)
super().__init__(**kwargs)
@classmethod
def me(cls):
"""
Returns information about the currently authenticated user.
:return:
:rtype: User
"""
return fields.ObjectField(name=cls.ENDPOINT, init_class=cls).decode(
cls.element_from_string(
cls._get_request(endpoint=cls.ENDPOINT + '/me').text
)
)
| apache-2.0 | 8,132,271,359,187,268,000 | 38 | 84 | 0.707799 | false |
baroquebobcat/pants | tests/python/pants_test/backend/jvm/tasks/test_jvm_run.py | 16 | 2540 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from contextlib import contextmanager
from pants.backend.jvm.subsystems.jvm import JVM
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.jvm_run import JvmRun
from pants.util.contextutil import pushd, temporary_dir
from pants_test.jvm.jvm_task_test_base import JvmTaskTestBase
class JvmRunTest(JvmTaskTestBase):
@classmethod
def task_type(cls):
return JvmRun
@contextmanager
def setup_cmdline_run(self, **options):
"""Run the JvmRun task in command line only mode with the specified extra options.
:returns: the command line string
"""
self.set_options(only_write_cmd_line='a', **options)
jvm_binary = self.make_target('src/java/org/pantsbuild:binary', JvmBinary,
main='org.pantsbuild.Binary')
context = self.context(target_roots=[jvm_binary])
jvm_run = self.create_task(context)
self._cmdline_classpath = [os.path.join(self.pants_workdir, c) for c in ['bob', 'fred']]
self.populate_runtime_classpath(context=jvm_run.context, classpath=self._cmdline_classpath)
with temporary_dir() as pwd:
with pushd(pwd):
cmdline_file = os.path.join(pwd, 'a')
self.assertFalse(os.path.exists(cmdline_file))
jvm_run.execute()
self.assertTrue(os.path.exists(cmdline_file))
with open(cmdline_file) as fp:
contents = fp.read()
yield contents
def test_cmdline_only(self):
main_entry = 'org.pantsbuild.Binary'
with self.setup_cmdline_run(main=main_entry) as cmdline:
self.assertTrue(self._match_cmdline_regex(cmdline, main_entry))
def test_opt_main(self):
main_entry = 'org.pantsbuild.OptMain'
with self.setup_cmdline_run(main=main_entry) as cmdline:
self.assertTrue(self._match_cmdline_regex(cmdline, main_entry))
def _match_cmdline_regex(self, cmdline, main):
# Original classpath is embedded in the manifest file of a synthetic jar, just verify
# classpath is a singleton jar here.
if JVM.options_default:
opts_str = ' '.join(JVM.options_default) + ' '
else:
opts_str = ''
m = re.search(r'java {}-cp [^:]*\.jar {}'.format(opts_str, main), cmdline)
return m is not None
| apache-2.0 | 903,758,622,422,303,500 | 38.076923 | 95 | 0.689764 | false |
facebookincubator/prophet | python/prophet/tests/test_utilities.py | 2 | 1331 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from unittest import TestCase
import numpy as np
import pandas as pd
from prophet import Prophet
from prophet.utilities import regressor_coefficients
DATA = pd.read_csv(
os.path.join(os.path.dirname(__file__), 'data.csv'),
parse_dates=['ds'],
)
class TestUtilities(TestCase):
def test_regressor_coefficients(self):
m = Prophet()
N = DATA.shape[0]
df = DATA.copy()
np.random.seed(123)
df['regr1'] = np.random.normal(size=N)
df['regr2'] = np.random.normal(size=N)
m.add_regressor('regr1', mode='additive')
m.add_regressor('regr2', mode='multiplicative')
m.fit(df)
coefs = regressor_coefficients(m)
self.assertTrue(coefs.shape == (2, 6))
# No MCMC sampling, so lower and upper should be the same as mean
self.assertTrue(np.array_equal(coefs['coef_lower'].values, coefs['coef'].values))
self.assertTrue(np.array_equal(coefs['coef_upper'].values, coefs['coef'].values))
| bsd-3-clause | 2,474,465,620,953,051,600 | 31.463415 | 89 | 0.671675 | false |
cdd1969/pygwa | lib/flowchart/nodes/n09_selectdates/node_pickequaldates.py | 1 | 5605 | #!/usr/bin python
# -*- coding: utf-8 -*-
import gc
from lib.functions.general import isNumpyDatetime
from lib.flowchart.nodes.generalNode import NodeWithCtrlWidget, NodeCtrlWidget
import numpy as np
import pandas as pd
class pickEqualDatesNode(NodeWithCtrlWidget):
"""Select values in dataframe based on passed dates from another dataframe"""
nodeName = "Select Date-Rows"
uiTemplate = [
{'name': 'datetime <pattern>', 'type': 'list', 'value': None, 'default': None, 'values': [None], 'tip': 'Location of the datetime objects.\nBy default is `None`, meaning that datetime objects are\nlocated within `pd.DataFrame.index`. If not `None` - pass the\ncolumn-name of dataframe where datetime objects are located.'},
{'name': 'datetime <pickFrom>', 'type': 'list', 'value': None, 'default': None, 'values': [None], 'tip': 'Location of the datetime objects.\nBy default is `None`, meaning that datetime objects are\nlocated within `pd.DataFrame.index`. If not `None` - pass the\ncolumn-name of dataframe where datetime objects are located.'},
{'title': 'Slice Datetime', 'name': 'slice', 'type': 'bool', 'value': False, 'expanded': True, 'tip': 'Slice table between Start and End datetimes', 'children': [
{'name': 'Start', 'type': 'str', 'value': 'YYY-MM-DD HH:MM:SS', 'default': 'YYY-MM-DD HH:MM:SS', 'tip': 'Start value of datetime slice. Select entries that are >= `Start`'},
{'name': 'End', 'type': 'str', 'value': 'YYY-MM-DD HH:MM:SS', 'default': 'YYY-MM-DD HH:MM:SS', 'tip': 'End value of datetime slice. Select entries that are <= `End`'}
]}
]
def __init__(self, name, parent=None):
super(pickEqualDatesNode, self).__init__(name, parent=parent, terminals={'pattern': {'io': 'in'}, 'pickFrom': {'io': 'in'}, 'Out': {'io': 'out'}}, color=(255, 170, 255, 150))
self._df1_id = None
def _createCtrlWidget(self, **kwargs):
return pickEqualDatesNodeCtrlWidget(**kwargs)
def process(self, pattern, pickFrom):
df1 = pickFrom
df2 = pattern
self.CW().disconnect_valueChanged2upd(self.CW().param('slice', 'Start'))
self.CW().disconnect_valueChanged2upd(self.CW().param('slice', 'End'))
if self.CW().p['slice'] is True:
self.CW().connect_valueChanged2upd(self.CW().param('slice', 'Start'))
self.CW().connect_valueChanged2upd(self.CW().param('slice', 'End'))
if df1 is None:
self.CW().disconnect_valueChanged2upd(self.CW().param('datetime <pickFrom>'))
self.CW().param('datetime <pickFrom>').setLimits([None])
self.CW().connect_valueChanged2upd(self.CW().param('datetime <pickFrom>'))
return {'Out': None}
else:
self.CW().disconnect_valueChanged2upd(self.CW().param('datetime <pickFrom>'))
colname = [col for col in df1.columns if isNumpyDatetime(df1[col].dtype)]
self.CW().param('datetime <pickFrom>').setLimits(colname)
self.CW().connect_valueChanged2upd(self.CW().param('datetime <pickFrom>'))
if df2 is None:
self.CW().disconnect_valueChanged2upd(self.CW().param('datetime <pattern>'))
self.CW().param('datetime <pattern>').setLimits([None])
self.CW().connect_valueChanged2upd(self.CW().param('datetime <pattern>'))
else:
self.CW().disconnect_valueChanged2upd(self.CW().param('datetime <pattern>'))
colname = [col for col in df2.columns if isNumpyDatetime(df2[col].dtype)]
self.CW().param('datetime <pattern>').setLimits(colname)
self.CW().connect_valueChanged2upd(self.CW().param('datetime <pattern>'))
if self._df1_id != id(df1):
self._df1_id = id(df1)
t_vals = df1[self.CW().p['datetime <pickFrom>']].values
t_min, t_max = pd.to_datetime(str(min(t_vals))), pd.to_datetime(str(max(t_vals)))
self.CW().param('slice', 'Start').setDefault(t_min.strftime('%Y-%m-%d %H:%M:%S'))
self.CW().param('slice', 'End').setDefault(t_max.strftime('%Y-%m-%d %H:%M:%S'))
kwargs = self.ctrlWidget().prepareInputArguments()
# now actually slice
if kwargs['slice']:
df = df1.set_index(kwargs['datetime <pickFrom>'])
start = df.index.searchsorted(kwargs['slice_start'], side='left')
end = df.index.searchsorted(kwargs['slice_end'], side='right')
del df
df1 = df1[start:end].copy(deep=True) # warning pointer to new DF!
# now pick dates as in another df
if kwargs['datetime <pattern>'] is not None and kwargs['datetime <pickFrom>'] is not None:
selector = df1[kwargs['datetime <pickFrom>']].isin(df2[kwargs['datetime <pattern>']])
df1 = df1[selector]
gc.collect()
return {'Out': df1}
class pickEqualDatesNodeCtrlWidget(NodeCtrlWidget):
def __init__(self, **kwargs):
super(pickEqualDatesNodeCtrlWidget, self).__init__(update_on_statechange=True, **kwargs)
def prepareInputArguments(self):
kwargs = dict()
kwargs['datetime <pickFrom>'] = self.p['datetime <pickFrom>']
kwargs['datetime <pattern>'] = self.p['datetime <pattern>']
kwargs['slice'] = self.p['slice']
if kwargs['slice'] is True:
kwargs['slice_start'] = np.datetime64(self.p['slice', 'Start'] + 'Z')
kwargs['slice_end'] = np.datetime64(self.p['slice', 'End'] + 'Z')
return kwargs
| gpl-2.0 | 2,589,416,075,492,528,000 | 52.380952 | 336 | 0.604639 | false |
ptrendx/mxnet | tests/python/unittest/test_autograd.py | 3 | 13814 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import mxnet.ndarray as nd
from mxnet.ndarray import zeros_like
from mxnet.autograd import *
from mxnet.test_utils import *
from common import setup_module, with_seed, teardown
from mxnet.test_utils import EnvManager
def grad_and_loss(func, argnum=None):
"""Return function that computes both gradient of arguments and loss value.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_and_loss_func: a python function
A function that would compute both the gradient of arguments and loss value.
"""
@functools.wraps(func)
def wrapped(*args):
"""Wrapped function."""
variables = args
if argnum is not None:
argnum_ = argnum if isinstance(argnum, list) else [argnum]
variables = [args[i] for i in argnum_]
for x in variables:
assert isinstance(x, NDArray), "type of autograd input should NDArray."
grads = [zeros_like(x) for x in variables]
mark_variables(variables, grads)
with record():
outputs = func(*args)
backward([outputs] if isinstance(outputs, NDArray) else outputs)
return grads, outputs
return wrapped
def grad(func, argnum=None):
"""Return function that computes gradient of arguments.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_func: a python function
A function that would compute the gradient of arguments.
Examples
--------
>>> # autograd supports dynamic graph which is changed
>>> # every instance
>>> def func(x):
>>> r = random.randint(0, 1)
>>> if r % 2:
>>> return x**2
>>> else:
>>> return x/3
>>> # use `grad(func)` to get the gradient function
>>> for x in range(10):
>>> grad_func = grad(func)
>>> inputs = nd.array([[1, 2, 3], [4, 5, 6]])
>>> grad_vals = grad_func(inputs)
"""
grad_with_loss_func = grad_and_loss(func, argnum)
@functools.wraps(grad_with_loss_func)
def wrapped(*args):
return grad_with_loss_func(*args)[0]
return wrapped
def autograd_assert(*args, **kwargs):
func = kwargs["func"]
grad_f = kwargs["grad_func"]
argnum = kwargs["argnum"] if 'argnum' in kwargs else None
grad_func = grad_and_loss(func, argnum)
grad_vals, output = grad_func(*args)
res = func(*args)
assert same(output.asnumpy(), res.asnumpy())
grad_res = grad_f(*args)
assert len(grad_vals) == len(grad_res)
for a, b in zip(grad_vals, grad_res):
assert same(a.asnumpy(), b.asnumpy())
@with_seed()
def test_unary_func():
def check_unary_func(x):
f_exp = lambda x: nd.exp(x)
f_exp_grad = lambda x: [nd.exp(x)]
autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
f_half = lambda x: x/2
f_half_grad = lambda x: [nd.ones(x.shape) * 0.5]
autograd_assert(x, func=f_half, grad_func=f_half_grad)
f_square = lambda x: x**2
f_square_grad = lambda x: [2*x]
autograd_assert(x, func=f_square, grad_func=f_square_grad)
uniform = nd.uniform(shape=(4, 5))
stypes = ['default', 'row_sparse', 'csr']
with EnvManager('MXNET_STORAGE_FALLBACK_LOG_VERBOSE', '0'):
for stype in stypes:
check_unary_func(uniform.tostype(stype))
@with_seed()
def test_binary_func():
def check_binary_func(x, y):
f_add = lambda x, y: x+y
f_add_grad = lambda x, y: [nd.ones(x.shape), nd.ones(y.shape)]
autograd_assert(x, y, func=f_add, grad_func=f_add_grad)
f_mul = lambda x, y: x*y
f_mul_grad = lambda x, y: [y, x]
autograd_assert(x, y, func=f_mul, grad_func=f_mul_grad)
f_compose = lambda x, y: x+x*y
f_compose_grad = lambda x, y: [nd.ones(x.shape) + y, x]
autograd_assert(x, y, func=f_compose, grad_func=f_compose_grad)
uniform_x = nd.uniform(shape=(4, 5))
uniform_y = nd.uniform(shape=(4, 5))
stypes = ['default', 'row_sparse', 'csr']
with EnvManager('MXNET_STORAGE_FALLBACK_LOG_VERBOSE', '0'):
for stype_x in stypes:
for stype_y in stypes:
x = uniform_x.tostype(stype_x)
y = uniform_y.tostype(stype_y)
check_binary_func(x, y)
@with_seed()
def test_operator_with_state():
def f_fc(a, b, weight, bias):
x = a*b
fc = nd.FullyConnected(
x, weight, bias, num_hidden=32)
return fc
a = nd.uniform(shape=(64, 50))
b = nd.uniform(shape=(64, 50))
weight = nd.uniform(shape=(32, 50))
bias = nd.uniform(shape=(32, ))
grad_func = grad_and_loss(f_fc)
grad_vals, outputs = grad_func(a, b, weight, bias)
# (TODO) assert
@with_seed()
def test_argnum():
def f_with_mode(a, b, mode):
if mode:
return a+b
else:
return a*b
a = nd.uniform(shape=(3, 2))
b = nd.uniform(shape=(3, 2))
f_add_grad = lambda x, y, mode: [nd.ones(x.shape), nd.ones(y.shape)]
f_mul_grad = lambda x, y, mode: [y, x]
autograd_assert(a, b, True,
argnum=[0, 1], func=f_with_mode, grad_func=f_add_grad)
autograd_assert(a, b, False,
argnum=[0, 1], func=f_with_mode, grad_func=f_mul_grad)
@with_seed()
def test_training():
x = nd.ones((10, 10))
with record():
y = nd.Dropout(x, p=0.5)
assert not (y.asnumpy() == x.asnumpy()).all()
with pause():
y = nd.Dropout(x, p=0.5)
assert (y.asnumpy() == x.asnumpy()).all()
@with_seed()
def test_out_grads():
x = nd.ones((3, 5))
dx = nd.zeros_like(x)
mark_variables([x], [dx])
da = None
db = nd.array([1,2,3,4,5])
dc = nd.array([5,4,3,2,1])
with record():
a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
backward([a, b, c], [da, db, dc])
assert (dx.asnumpy() == np.array(
[[1,1,1,1,1],
[1,2,3,4,5],
[5,4,3,2,1]])).all()
@with_seed()
def test_detach_updated_grad():
x = nd.ones((2, 2))
dx = nd.zeros_like(x)
y = nd.ones_like(x)
dy = nd.zeros_like(x)
mark_variables([x, y], [dx, dy])
assert x._fresh_grad == False
assert y._fresh_grad == False
with record():
x2 = x + 2
y2 = x2 + y
y2.backward()
assert (dx.asnumpy() == 1).all()
assert x._fresh_grad == True
assert y._fresh_grad == True
dx[:] = 0
x._fresh_grad = False
y._fresh_grad = False
assert x._fresh_grad == False
assert y._fresh_grad == False
with record():
x2 = x + 2
x2 = x2.detach()
y2 = x2 + y
y2.backward()
assert (dx.asnumpy() == 0).all()
assert y._fresh_grad == True
assert x._fresh_grad == False
@with_seed()
def test_retain_grad():
x = mx.nd.ones((2, 2))
dx = mx.nd.zeros((2, 2))
mark_variables([x], [dx], grad_reqs='add')
with record():
y = x + 1
y.backward(retain_graph=False)
assert (dx.asnumpy() == 1).all()
dx[:] = 0
with record():
y = x + 1
y.backward(retain_graph=True)
y.backward(retain_graph=False)
assert (dx.asnumpy() == 2).all()
# The following sequence should throw an exception. We discard the expected
# stderr stack trace output for this operation to keep the test logs clean.
with discard_stderr():
try:
with record():
y = x + 1
y.backward()
y.backward()
except Exception:
return
raise AssertionError(
"differentiating the same graph twice without retain_graph should fail")
@with_seed()
def test_attach_grad():
def check_attach_grad(x):
assert x.grad is None
x.attach_grad()
with record():
y = x * 2
assert y.grad is None
y.backward(out_grad=mx.nd.ones_like(y).tostype(x.stype))
assert (x.grad.asnumpy() == 2).all()
zeros = mx.nd.zeros((10, 10))
stypes = ['default', 'row_sparse', 'csr']
for stype in stypes:
x = zeros.tostype(stype)
check_attach_grad(x)
@with_seed()
def test_is_train():
x = mx.nd.ones((10, 10))
x.attach_grad()
with record(train_mode=True):
assert is_recording()
assert is_training()
y = mx.nd.Dropout(x, p=0.5)
assert y.asnumpy().max() == 2 and y.asnumpy().min() == 0
y.backward()
assert (x.grad.asnumpy() == y.asnumpy()).all()
with predict_mode():
assert is_recording()
assert not is_training()
y = mx.nd.Dropout(x, p=0.5)
assert (y.asnumpy() == x.asnumpy()).all()
y.backward(train_mode=False)
assert (x.grad.asnumpy() == x.asnumpy()).all()
with record(train_mode=False):
assert is_recording()
assert not is_training()
y = mx.nd.Dropout(x, p=0.5)
assert (y.asnumpy() == x.asnumpy()).all()
y.backward(train_mode=False)
assert (x.grad.asnumpy() == x.asnumpy()).all()
with train_mode():
assert is_recording()
assert is_training()
y = mx.nd.Dropout(x, p=0.5)
assert y.asnumpy().max() == 2 and y.asnumpy().min() == 0
y.backward()
assert (x.grad.asnumpy() == y.asnumpy()).all()
assert not is_recording()
assert not is_training()
y = mx.nd.Dropout(x, p=0.5)
assert (y.asnumpy() == x.asnumpy()).all()
with train_mode():
assert not is_recording()
assert is_training()
y = mx.nd.Dropout(x, p=0.5)
assert y.asnumpy().max() == 2 and y.asnumpy().min() == 0
@with_seed()
def test_function():
class func(Function):
def forward(self, x, y):
m = x / y
n = x * y
self.save_for_backward(x, y)
return m, n
def backward(self, dm, dn):
x, y = self.saved_tensors
dx = dm/y + dn*y
dy = dn*x - dm * x / y / y
return dx, dy
f = func()
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
y = mx.nd.random.uniform(shape=(10,))
y.attach_grad()
with record():
m, n = f(x, y)
backward([m, n])
dx1 = x.grad.asnumpy()
dy1 = y.grad.asnumpy()
with record():
backward([x/y, x*y])
# Non-zero atol required, as exposed by seed 630179191
atol = 1e-6
assert_almost_equal(x.grad.asnumpy(), dx1, atol=atol)
assert_almost_equal(y.grad.asnumpy(), dy1, atol=atol)
@with_seed()
def test_get_symbol():
x = mx.nd.ones((1,))
x.attach_grad()
with record():
y = x*x + 2*x - 1
assert len(get_symbol(y).list_arguments()) == 1
z = mx.nd.ones((1,))
z.attach_grad()
with record():
y = x*x + 2*z - 1
assert len(get_symbol(y).list_arguments()) == 2
@with_seed()
def test_grad_with_stype():
def check_grad_with_stype(array_stype, grad_stype, expected_stype):
x = mx.nd.zeros((1, 1), stype=array_stype)
x.attach_grad(stype=grad_stype)
# check grad attached
assert x.grad.stype == expected_stype
y = x.detach()
# check array detached
assert y.stype == array_stype
stypes = ['default', 'csr', 'row_sparse']
for stype in stypes:
# check the default stype of the gradient (same as the array stype)
check_grad_with_stype(stype, None, stype)
for grad_stype in stypes:
# check the stype of the gradient when provided
check_grad_with_stype(stype, grad_stype, grad_stype)
@with_seed()
def test_sparse_dot_grad():
def check_sparse_dot_grad(rhs):
lhs = rand_ndarray((2, 8), 'csr')
with mx.autograd.record():
y = mx.nd.dot(lhs, rhs)
y.backward()
grad = rhs.grad
grad_np = np.dot(lhs.asnumpy().T, np.ones((lhs.shape[0], rhs.shape[1])))
assert grad.stype == 'row_sparse'
assert_almost_equal(grad.asnumpy(), grad_np)
# check grad with row_sparse weight
shape = (8, 3)
rsp = mx.nd.ones(shape).tostype('row_sparse')
rsp.attach_grad()
check_sparse_dot_grad(rsp)
# check grad with dense weight
dns = mx.nd.ones(shape)
dns.attach_grad(stype='row_sparse')
check_sparse_dot_grad(dns)
@with_seed()
def test_gradient():
x = mx.nd.ones((1,))
x.attach_grad()
with mx.autograd.record():
z = mx.nd.elemwise_add(mx.nd.exp(x), x)
dx, = mx.autograd.grad(z, [x], create_graph=True)
assert abs(dx.asscalar() - 3.71828175) < 1e-7
dx.backward()
assert abs(x.grad.asscalar() - 2.71828175) < 1e-7
if __name__ == "__main__":
import nose
nose.runmodule()
| apache-2.0 | 665,376,381,904,392,400 | 29.561947 | 84 | 0.568119 | false |
apendleton/djmicro | djmicro.py | 1 | 3148 | import os, sys, types
_base_module = None
_app_name = None
def _extend_opts(initial, to_add):
if type(to_add) is dict:
initial.update(to_add)
elif type(to_add) is types.ModuleType:
module_opts = {k: v for k, v in to_add.__dict__.iteritems() if not k.startswith('__')}
initial.update(module_opts)
elif type(to_add) in (tuple, list):
for opt_set in to_add:
_extend_opts(initial, opt_set)
elif to_add is None:
pass
else:
raise ValueError("Options must be a dict, module, list, tuple, or None.")
def configure(options={}, module=None, app_name=None):
if not module:
# hack to figure out where we were called from
import sys, inspect
module = sys.modules[inspect.stack()[1][0].f_locals['__name__']]
# djmicro makes an implicit app that you can install models into, add modules from, etc.
if not app_name:
app_name = "djmicro"
global _app_name
_app_name = app_name
if app_name != __name__:
sys.modules[app_name] = sys.modules[__name__]
# settings
from django.conf import settings
if not settings.configured:
opts = dict(
DEBUG = True,
ROOT_URLCONF = module.__name__,
TEMPLATE_DIRS = [os.path.dirname(module.__file__)],
INSTALLED_APPS = [],
MIDDLEWARE_CLASSES = ('django.middleware.common.CommonMiddleware',)
)
_extend_opts(opts, options)
if 'djmicro' not in opts['INSTALLED_APPS']:
opts['INSTALLED_APPS'] += (app_name,)
settings.configure(**opts)
# urls
from django.conf.urls import patterns
module.urlpatterns = patterns('')
# wsgi application
from django.core.wsgi import get_wsgi_application
module.application = get_wsgi_application()
global _base_module
_base_module = module
def route(*args, **kwargs):
def add_route(view):
# if it's a class-based view, take .as_view() of it
from django.views.generic import View
target = view.as_view() if isinstance(view, types.TypeType) and issubclass(view, View) else view
from django.conf.urls import patterns, url
_base_module.urlpatterns += patterns('',
url(args[0], target, *args[1:], **kwargs)
)
return view
return add_route
def add_module_to_app(module, name=None):
if name is None:
name = module.__name__
# make it available as a module
sys.modules["%s.%s" % (_app_name, name)] = module
if _app_name != __name__:
sys.modules["%s.%s" % (__name__, name)] = module
# make it available with a from import
globals()[name] = module
# if it's a models module, special-case handle it because model detection has already occurred
if name == "models":
from django.apps import apps
new_config = apps.get_app_config(_app_name)
new_config.import_models(apps.all_models[_app_name])
apps.app_configs[_app_name] = new_config
def run():
from django.core.management import execute_from_command_line
execute_from_command_line() | bsd-3-clause | 8,589,437,361,519,496,000 | 31.132653 | 104 | 0.60864 | false |
nskrypnik/kivy3 | kivy3/renderer.py | 1 | 4050 |
"""
The MIT License (MIT)
Copyright (c) 2013 Niko Skrypnik
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Renderer class
=============
Unlike of THREE.js we may provide only one renderer which is the
Kivy widget and uses Kivy canvas and FBO concept for drawing graphics.
You may use this class as usual widget and place it wherever you need
in your application
"""
import os
import kivy3
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.graphics.fbo import Fbo
from kivy.graphics.instructions import InstructionGroup
from kivy.graphics.opengl import glEnable, glDisable, GL_DEPTH_TEST
from kivy.graphics import Callback, PushMatrix, PopMatrix, \
Rectangle, Canvas, UpdateNormalMatrix
kivy3_path = os.path.abspath(os.path.dirname(kivy3.__file__))
class RendererError(Exception):
pass
class Renderer(Widget):
def __init__(self, **kw):
self.shader_file = kw.pop("shader_file", None)
self.canvas = Canvas()
super(Renderer, self).__init__(**kw)
with self.canvas:
self._viewport = Rectangle(size=self.size, pos=self.pos)
self.fbo = Fbo(size=self.size,
with_depthbuffer=True, compute_normal_mat=True,
clear_color=(0., 0., 0., 0.))
self._config_fbo()
self.texture = self.fbo.texture
self.camera = None
self.scene = None
def _config_fbo(self):
# set shader file here
self.fbo.shader.source = self.shader_file or \
os.path.join(kivy3_path, "default.glsl")
with self.fbo:
Callback(self._setup_gl_context)
PushMatrix()
# instructions set for all instructions
self._instructions = InstructionGroup()
PopMatrix()
Callback(self._reset_gl_context)
def _setup_gl_context(self, *args):
glEnable(GL_DEPTH_TEST)
self.fbo.clear_buffer()
def _reset_gl_context(self, *args):
glDisable(GL_DEPTH_TEST)
def render(self, scene, camera):
self.scene = scene
self.camera = camera
self.camera.bind_to(self)
self._instructions.add(scene.as_instructions())
Clock.schedule_once(self._update_matrices, -1)
def on_size(self, instance, value):
self.fbo.size = value
self._viewport.texture = self.fbo.texture
self._viewport.size = value
self._viewport.pos = self.pos
self._update_matrices()
def on_pos(self, instance, value):
self._viewport.pos = self.pos
self._update_matrices()
def on_texture(self, instance, value):
self._viewport.texture = value
def _update_matrices(self, dt=None):
if self.camera:
self.fbo['projection_mat'] = self.camera.projection_matrix
self.fbo['modelview_mat'] = self.camera.modelview_matrix
else:
raise RendererError("Camera is not defined for renderer")
def set_clear_color(self, color):
self.fbo.clear_color = color
| mit | -260,763,505,390,716,480 | 32.471074 | 77 | 0.668889 | false |
holinnn/lupin | lupin/fields/field.py | 1 | 4514 | import warnings
from functools import reduce
from lupin import ValidatorsAndCombination, ValidatorsNullCombination
from ..validators import IsNone
from ..processors import null_processor
def _make_processor(processors):
"""Returns a callable for executing the processors
Args:
processors (list): a list of processor functions to execute
"""
if not processors:
return null_processor
return lambda value: reduce(lambda new_value, proc: proc(new_value), processors, value)
class Field(object):
"""Generic field that does not convert the values"""
def __init__(self, binding=None, default=None, validators=None, read_only=False, write_only=False,
optional=False, allow_none=False, pre_load=None, post_load=None,
pre_dump=None, post_dump=None, ignore_if_null=False):
"""
Args:
binding (str): attribute name to map on object
default (object): default value if data is absent
validators (list|ValidatorsCombination|Validator): list of validators or a combination a validators
read_only (bool): if True the field will only be used to serialize data
write_only (bool): if True the field will only be used to load data
optional (bool): if True it won't raise an error if no value provided for this field
allow_none (bool): if True None is a accepted has a valid value
pre_load (list): list of processors to execute before loading the value
post_load (list): list of processors to execute after loading the value
pre_dump (list): list of processors to execute before dumping the value
post_dump (list): list of processors to execute after dumping the value
ignore_if_null (bool): if True the fields will not be serialize when null
"""
if validators is None:
validators = ValidatorsNullCombination()
elif isinstance(validators, (list, tuple)):
warnings.warn("List of validators is deprecated, please use combinations (&|)", DeprecationWarning)
validators = ValidatorsAndCombination(validators)
if allow_none:
validators = IsNone() | validators
self.binding = binding
self.default = default
self._validators = validators or []
self.is_read_only = read_only
self.is_write_only = write_only
self.is_optional = optional
self.is_ignore_if_null = ignore_if_null
self._pre_load_processor = _make_processor(pre_load)
self._post_load_processor = _make_processor(post_load)
self._pre_dump_processor = _make_processor(pre_dump)
self._post_dump_processor = _make_processor(post_dump)
def pre_load(self, value):
return self._pre_load_processor(value)
def post_load(self, value):
return self._post_load_processor(value)
def _pre_dump(self, value):
return self._pre_dump_processor(value)
def _post_dump(self, value):
return self._post_dump_processor(value)
def load(self, value, mapper):
"""Loads python object from JSON value
Args:
value (object): a value
mapper (Mapper): mapper used to load data
Returns:
object
"""
return value
def dump(self, value, mapper):
"""Dump value to its JSON representation
Args:
value (object): a value
mapper (Mapper): mapper used to dump data
Returns:
object
"""
return value
def extract_attr(self, obj, mapper, key=None):
"""Get value of the `key` attribute of object.
If field has been provided a `binding` then it will
override `key`
Args:
obj (object): object to get value from
mapper (Mapper): mapper used to dump data
key (str): attribute name
Returns:
object
"""
key = self.binding or key
raw_value = getattr(obj, key)
value = self._pre_dump(raw_value)
value = self.dump(value, mapper)
return self._post_dump(value)
def validate(self, value, path, mapper):
"""Validate value againt field validators
Args:
value (object): value to validate
path (list): JSON path of value
mapper (Mapper): mapper used to validate data
"""
self._validators(value, path)
| mit | -4,351,825,536,594,507,000 | 35.112 | 111 | 0.621622 | false |
sayak-brm/struixLang | struixPrimitives.py | 1 | 19513 | ## Copyright 2016-17 Sayak Brahmachari
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
class AddWords:
''' Provides Built-in Words for the struixLang Interpreter. '''
def __init__(self, terp, ENABLE_UNSAFE_OPERATIONS = False, wordSets = None):
''' Collects the primitive words and updates the dicionary. '''
def IMPORT(terp):
name = terp.lexer.nextWord()
if name is '':
raise SyntaxError('Invalid Syntax')
try: lib = open('./lib/{}.sxLib'.format(name), 'r')
except: raise ImportError('No library named {}.'.format(name))
terp.run(lib.read())
lib.close()
IMPORT.__dict__['immediate'] = True
terp.addWords({'IMPORT': IMPORT})
self.unsafeOps = ENABLE_UNSAFE_OPERATIONS
self.importWordSets(terp, wordSets)
def importWordSets(self, terp, wordSets):
if wordSets is None:
wordSets = ['lists', 'execution', 'math', 'stack', 'values',
'values', 'functions', 'text', 'logic', 'control',
'io', 'pythonOps', 'shorthand']
for wordSet in wordSets:
try:
terp.addWords(eval('self.words4{}()'.format(wordSet)))
except AttributeError:
terp.run('IMPORT {}'.format(wordSet))
@staticmethod
def makeWord(code, imm=False):
''' Makes an executable word from list. '''
def word(terp):
''' Template for a list executor. '''
import types
if isinstance(code, list):
pointer = 0
while pointer < len(code):
if imm:
i=code[pointer].__dict__.get('immediate', False)
terp.immediate = i
terp.interpret(code[pointer])
pointer += 1
elif isinstance(code, (types.FunctionType, types.MethodType)):
code(terp)
else:
raise TypeError('Expected List')
return word
@staticmethod
def getVal(terp, val, lvl):
''' Parses and gets next value from lexer. '''
import types
val = terp.compile(val)
if isinstance(val, (types.FunctionType, types.MethodType)):
''' Evaluates before accepting. '''
val(terp)
while len(terp.compileStack) > lvl:
word = terp.lexer.nextWord()
terp.interpret(terp.compile(word))
if len(terp.stack) < 1:
while len(terp.compileStack) > lvl:
terp.compileStack.pop()
raise SyntaxError('Invalid Syntax.')
val = terp.stack.pop()
return val
@staticmethod
def words4io():
''' Provides Words for output operations. '''
def PRINT(terp):
''' Pops & Displays the Top of Stack (ToS). '''
if len(terp.stack) < 1:
raise IndexError('Not enough items on stack.')
print(terp.stack.pop())
def PSTACK(terp):
''' Displays the complete stack. '''
stackList = terp.stack[:]
stackList.reverse()
print('\n'.join(repr(val) for val in stackList))
def INPUT(terp):
''' Accepts value from user. '''
val = input()
num = terp.parseNumber(val)
if num:
val = num
terp.stack.append(val)
return {
"PRINT": PRINT,
"PSTACK": PSTACK,
"INPUT": INPUT # ,
# ".": PRINT,
# ".S": PSTACK
}
@staticmethod
def words4execution():
''' Provides Words for controlling execution. '''
def RAISE(terp):
''' Raises an error. '''
error = terp.stack.pop()
msg = terp.stack.pop()
print('ERROR: {} - {}'.format(error, msg))
try:
exec('raise {}(\'{}\')'.format(error, msg))
except NameError:
raise RuntimeError('{} - {}'.format(error, msg))
def EXIT(terp):
''' Terminates the execution. '''
exit()
return {
"EXIT": EXIT,
"RAISE": RAISE
}
@staticmethod
def words4math():
''' Provides Words for several operations. '''
def CALCGEN(op):
''' Generates Words for a specific operation. '''
def CALC(terp):
''' Template word for operations. '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
n1 = terp.stack.pop()
n2 = terp.stack.pop()
terp.stack.append(eval(repr(n2) + ' ' + op + ' ' + repr(n1)))
return CALC
ops = ['+', '-', '*', '**',
'/', '//', '%', '@',
'<<', '>>', '&', '|',
'^', '~', '<', '>',
'<=', '>=', '==', '!=',
'in', 'is', 'or', 'and']
return dict(zip([op.upper() for op in ops], [CALCGEN(op) for op in ops]))
@staticmethod
def words4stack():
''' Provides Words for Stack Operations. '''
def DUP(terp):
''' Duplicate Top of Stack (ToS). '''
if len(terp.stack) < 1:
raise IndexError('Not enough items on stack.')
terp.stack.append(terp.stack[-1])
def DROP(terp):
''' Remove Top of Stack (ToS). '''
if len(terp.stack) < 1:
raise IndexError('Not enough items on stack.')
terp.stack.pop()
def SWAP(terp):
''' Exchange positions of ToS and second item on stack (2oS). '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
tos = terp.stack.pop()
_2os = terp.stack.pop()
terp.stack.append(tos)
terp.stack.append(_2os)
def OVER(terp):
''' Copy 2oS on top of stack. '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
tos = terp.stack.pop()
_2os = terp.stack.pop()
terp.stack.append(_2os)
terp.stack.append(tos)
terp.stack.append(_2os)
def ROT(terp):
''' Copy 3oS on top of stack. '''
if len(terp.stack) < 3:
raise IndexError('Not enough items on stack.')
tos = terp.stack.pop()
_2os = terp.stack.pop()
_3os = terp.stack.pop()
terp.stack.append(_2os)
terp.stack.append(tos)
terp.stack.append(_3os)
return {
"DUP": DUP,
"DROP": DROP,
"SWAP": SWAP,
"OVER": OVER,
"ROT": ROT
}
def words4values(self):
''' Provides support for variables and constants. '''
def VAR(terp):
''' Provides creation of variables. '''
class Variable:
''' Provides a template class for variables. '''
def __init__(self, val=None):
''' Initializes a Variable object. '''
self.val = val
def access(self, terp):
''' Puts a reference to the variable value on the stack. '''
terp.stack.append(self)
name = terp.lexer.nextWord()
if name is '':
raise SyntaxError('Invalid Syntax')
var = Variable()
terp.define(name, var.access)
def CONST(terp):
''' Provides creation of constants. '''
class Constant:
''' Provides a template class with a write-once value. '''
def __init__(self, val):
''' Initializes a Constant object with a value. '''
object.__setattr__(self, 'val', val)
def __setattr__(self, name, val):
''' Provides a descriptor to prevent changing values. '''
if name is 'val':
raise AttributeError('Constant Attribute.')
object.__setattr__(self, name, val)
def access(self, terp):
''' Puts the value of the constant on the stack. '''
terp.stack.append(self.val)
name = terp.lexer.nextWord()
lvl = len(terp.compileStack)
val = self.getVal(terp, terp.lexer.nextWord(), lvl)
if name is '' or val is '':
raise SyntaxError('Invalid Syntax')
elif name in terp.dictionary:
raise SyntaxError('Constant value set')
const = Constant(val)
terp.define(name, const.access)
def ASSIGN(terp):
''' Helps storing values to variables. (INFIX) '''
nxt = terp.lexer.nextWord()
if nxt is '':
raise SyntaxError('Invalid Syntax')
lvl = len(terp.compileStack)
val = self.getVal(terp, nxt, lvl)
def helper(terp):
if len(terp.stack) < 1:
raise IndexError('Not enough items on stack.')
ref = terp.stack.pop()
ref.val = val
if not terp.isCompiling():
helper(terp)
else:
terp.stack.append(helper)
def STORE(terp):
''' Helps storing values to variables. (POSTFIX) '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
val = terp.stack.pop()
ref = terp.stack.pop()
ref.val = val
def FETCH(terp):
''' Helps retrieviing values from variables. '''
if len(terp.stack) < 1:
raise IndexError('Not enough items on stack.')
ref = terp.stack.pop()
terp.stack.append(ref.val)
CONST.__dict__['immediate'] = True
VAR.__dict__['immediate'] = True
ASSIGN.__dict__['immediate'] = True
return {
"VAR": VAR,
"CONST": CONST,
"FETCH": FETCH,
"=": ASSIGN,
"STORE": STORE #,
# "@": FETCH
}
@staticmethod
def words4text():
''' Adds words for handling of comments. '''
def COMMENT(terp):
''' Adds support for comments. '''
terp.lexer.clearLine()
COMMENT.__dict__['immediate'] = True
return {
"#": COMMENT
}
def words4pythonOps(self):
''' Provides interfaces to the Python backend. '''
def REQUESTUNSAFE(terp):
if not self.unsafeOps:
ans = input("Enter Y to allow potentially unsafe operations:")
self.unsafeOps = True if ans.upper() == 'Y' else False
def PYEXEC(terp):
''' Executes Python code. '''
if not self.unsafeOps:
raise PermissionError('Unsafe Operations are disabled.')
exec(terp.stack.pop())
def PYEVAL(terp):
''' Evaluates value of Python code. '''
if not self.unsafeOps:
raise PermissionError('Unsafe Operations are disabled.')
terp.stack.append(eval(terp.stack.pop()))
def PYIMPORT(terp):
''' Evaluates value of Python code. '''
if not self.unsafeOps:
raise PermissionError('Unsafe Operations are disabled.')
module = terp.stack.pop()
exec('global {m}\nimport {m}'.format(m=module))
def PYLITEVAL(terp):
''' Evaluates value of Python expressions. '''
terp.stack.append(__import__('ast').literal_eval(terp.stack.pop()))
return {
"PYEVAL": PYEVAL,
"PYEXEC": PYEXEC,
"PYLITEVAL": PYLITEVAL,
"PYIMPORT": PYIMPORT,
"REQUESTUNSAFE": REQUESTUNSAFE
}
def words4functions(self):
''' Supports creation of user-defined words. '''
def DEF(terp):
''' Marks beginning of user-defined words. '''
name = terp.lexer.nextWord()
if name is '':
raise SyntaxError('Invalid Syntax')
terp.newWord = name
terp.startCompile()
def END(terp):
''' Marks end of user-defined words. '''
if terp.immediate_compiled: return IMMEND(terp)
code = terp.stopCompile()
terp.define(terp.newWord, self.makeWord(code))
terp.newWord = None
def IMMEND(terp):
''' Marks end of immediate user-defined words. '''
code = terp.stopCompile()
word = self.makeWord(code, True)
word.__dict__['immediate'] = True
terp.define(terp.newWord, word)
terp.newWord = None
def NEXT(terp):
''' Appends next word to stack and skips it during execution. '''
def helper(terp):
lvl = len(terp.compileStack)
nxt = terp.lexer.nextWord()
val = self.getVal(terp, nxt, lvl)
terp.stack.append(val)
helper.__dict__['immediate'] = True
if terp.newWord == None:
helper(terp)
else:
terp.stack.append(helper)
NEXT.__dict__['immediate'] = True
DEF.__dict__['immediate'] = True
END.__dict__['immediate'] = True
return {
"DEF": DEF,
"END": END,
"NEXT": NEXT # ,
# ":": DEF,
# ";": END
}
@staticmethod
def words4lists():
''' Words for list management. '''
def LIST(terp):
''' Creates a list. '''
terp.startCompile()
def LIST_END(terp):
''' Marks end of list. '''
lst = []
lst += terp.stopCompile()
terp.stack.append(lst)
def LENGTH(terp):
''' Gives the length of a list. '''
if len(terp.stack) < 1:
raise IndexError('Not enough items on stack.')
terp.stack.append(len(terp.stack.pop()))
def ITEM(terp):
''' Gives an element of a list. '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
key = terp.stack.pop()
terp.stack.append(terp.stack.pop()[key])
LIST.__dict__['immediate'] = True
LIST_END.__dict__['immediate'] = True
return {
"[": LIST,
"]": LIST_END,
"LENGTH": LENGTH,
"ITEM": ITEM
}
@staticmethod
def words4logic():
''' Words for logical and boolean operations. '''
def NOT(terp):
''' Provides logical operator NOT(!). '''
if len(terp.stack) < 1:
raise IndexError('Not enough items on stack.')
terp.stack.append(not terp.stack.pop())
def TRUE(terp):
''' Represents the boolean True. '''
terp.stack.append(True)
def FALSE(terp):
''' Represents the boolean False. '''
terp.stack.append(False)
TRUE.__dict__['immediate'] = True
FALSE.__dict__['immediate'] = True
return {
"NOT": NOT,
"TRUE": TRUE,
"FALSE": FALSE
}
def words4control(self):
''' Provides control structures. '''
def RUN(terp):
''' Provides execution of lists containing struixLang code. '''
import types
if len(terp.stack) < 1:
raise IndexError('Not enough items on stack.')
code = terp.stack.pop()
if isinstance(code, (types.FunctionType, types.MethodType)):
code(terp)
if isinstance(code, list):
terp.interpret(self.makeWord(code))
def TIMES(terp):
''' Iterating structure like for-loop. '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
n = terp.stack.pop()
code = terp.stack.pop()
word = self.makeWord(code)
if n == float('inf'):
while True:
word(terp)
else:
for _ in range(n):
word(terp)
def IFTRUE(terp):
''' Performs a task on recieving TRUE. '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
code = terp.stack.pop()
if terp.stack.pop():
terp.interpret(self.makeWord(code))
def IFFALSE(terp):
''' Performs a task on recieving FALSE. '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
code = terp.stack.pop()
if not terp.stack.pop():
terp.interpret(self.makeWord(code))
def IFELSE(terp):
''' Performs different task for different boolean values. '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
code2 = terp.stack.pop()
code1 = terp.stack.pop()
if terp.stack.pop():
terp.interpret(self.makeWord(code1))
else:
terp.interpret(self.makeWord(code2))
def WHILE(terp):
''' Variable-iteration, entry-control loop. '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
code = self.makeWord(terp.stack.pop())
cond = self.makeWord(terp.stack.pop())
while True:
cond(terp)
if len(terp.stack) < 1:
raise IndexError('Not enough items on stack.')
if not terp.stack.pop():
break
code(terp)
def DOWHILE(terp):
''' Variable-iteration, exit-control loop. '''
if len(terp.stack) < 2:
raise IndexError('Not enough items on stack.')
code = self.makeWord(terp.stack.pop())
cond = self.makeWord(terp.stack.pop())
while True:
code(terp)
cond(terp)
if len(terp.stack) < 1:
raise IndexError('Not enough items on stack.')
if not terp.stack.pop():
break
return {
"RUN": RUN,
"TIMES": TIMES,
"IFTRUE": IFTRUE,
"IFFALSE": IFFALSE,
"IFELSE": IFELSE,
"WHILE": WHILE,
"DOWHILE": DOWHILE
}
| apache-2.0 | -3,881,928,785,915,033,000 | 37.639604 | 81 | 0.484651 | false |
JioCloud/oslo-incubator | tests/unit/test_context.py | 1 | 1036 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.common import context
from openstack.common import test
class ContextTest(test.BaseTestCase):
def test_context(self):
ctx = context.RequestContext()
self.assertTrue(ctx)
def test_admin_context_show_deleted_flag_default(self):
ctx = context.get_admin_context()
self.assertFalse(ctx.show_deleted)
| apache-2.0 | -9,115,042,374,442,809,000 | 33.533333 | 78 | 0.723938 | false |
tseaver/google-cloud-python | spanner/pylint.config.py | 8 | 1197 | # Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is used to configure gcp-devrel-py-tools run-pylint."""
import copy
from gcp_devrel.tools import pylint
# Library configuration
# library_additions = {}
# Ignore generated code
library_replacements = copy.deepcopy(pylint.DEFAULT_LIBRARY_RC_REPLACEMENTS)
library_replacements['MASTER']['ignore'].append('spanner_v1')
library_replacements['MASTER']['ignore'].append('spanner_admin_instance_v1')
library_replacements['MASTER']['ignore'].append('spanner_admin_database_v1')
# Test configuration
# test_additions = copy.deepcopy(library_additions)
# test_replacements = copy.deepcopy(library_replacements)
| apache-2.0 | 2,929,610,537,561,989,000 | 35.272727 | 76 | 0.767753 | false |
tempbottle/ironpython3 | Tests/test_builtinfunc.py | 2 | 21492 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
#-----------------------------------------------------------------------------------
#These have to be run first: importing iptest.assert_util masked a bug. Just make sure
#these do not throw
for stuff in [bool, True, False]:
temp = dir(stuff)
items = globals().items() #4716
import sys
def cp946():
builtins = __builtins__
if type(builtins) is type(sys):
builtins = builtins.__dict__
if "hasattr" not in builtins:
raise "hasattr should be in __builtins__"
if "HasAttr" in builtins:
raise "HasAttr should not be in __builtins__"
cp946()
if sys.platform=="cli":
import clr
cp946()
#-----------------------------------------------------------------------------------
from iptest.assert_util import *
AssertError(NameError, lambda: __new__)
def test_callable():
class C: x=1
Assert(callable(min))
Assert(not callable("a"))
Assert(callable(callable))
Assert(callable(lambda x, y: x + y))
Assert(callable(C))
Assert(not callable(C.x))
Assert(not callable(__builtins__))
def test_callable_oldclass():
# for class instances, callable related to whether the __call__ attribute is defined.
# This can be mutated at runtime.
class Dold:
pass
d=Dold()
#
AreEqual(callable(d), False)
#
d.__call__ = None # This defines the attr, even though it's None
AreEqual(callable(d), True) # True for oldinstance, False for new classes.
#
del (d.__call__) # now remove the attr, no longer callable
AreEqual(callable(d), False)
def test_callable_newclass():
class D(object):
pass
AreEqual(callable(D), True)
d=D()
AreEqual(callable(d), False)
#
# New class with a __call__ defined is callable()
class D2(object):
def __call__(self): pass
d2=D2()
AreEqual(callable(d2), True)
# Inherit callable
class D3(D2):
pass
d3=D3()
AreEqual(callable(d3), True)
def test_cmp():
x = {}
x['foo'] = x
y = {}
y['foo'] = y
AssertError(RuntimeError, cmp, x, y)
def test_reversed():
class ToReverse:
a = [1,2,3,4,5,6,7,8,9,0]
def __len__(self): return len(self.a)
def __getitem__(self, i): return self.a[i]
x = []
for i in reversed(ToReverse()):
x.append(i)
Assert(x == [0,9,8,7,6,5,4,3,2,1])
# more tests for 'reversed'
AssertError(TypeError, reversed, 1) # arg to reversed must be a sequence
AssertError(TypeError, reversed, None)
AssertError(TypeError, reversed, ToReverse)
# no __len__ on class, reversed should throw
class x(object):
def __getitem__(self, index): return 2
def __len__(): return 42
a = x()
a.__len__ = __len__
AssertError(TypeError, reversed, a)
# no __len__ on class, reversed should throw
class x(object):
def __len__(self): return 42
def __getitem__(index): return 2
a = x()
a.__getitem__ = __getitem__
AssertError(TypeError, reversed, a)
def test_reduce():
def add(x,y): return x+y;
Assert(reduce(add, [2,3,4]) == 9)
Assert(reduce(add, [2,3,4], 1) == 10)
AssertError(TypeError, reduce, None, [1,2,3]) # arg 1 must be callable
AssertError(TypeError, reduce, None, [2,3,4], 1)
AssertError(TypeError, reduce, add, None) # arg 2 must support iteration
AssertError(TypeError, reduce, add, None, 1)
AssertError(TypeError, reduce, add, []) # arg 2 must not be empty sequence with no initial value
AssertError(TypeError, reduce, add, "") # empty string sequence
AssertError(TypeError, reduce, add, ()) # empty tuple sequence
Assert(reduce(add, [], 1), 1) # arg 2 has initial value through arg 3 so no TypeError for this
Assert(reduce(add, [], None) == None)
AssertError(TypeError, reduce, add, [])
AssertError(TypeError, reduce, add, "")
AssertError(TypeError, reduce, add, ())
def test_apply():
def foo(): return 42
AreEqual(apply(foo), 42)
def test_map():
def cat(x,y):
ret = ""
if x != None: ret += x
if y != None: ret += y
return ret
Assert(map(cat, ["a","b"],["c","d", "e"]) == ["ac", "bd", "e"])
Assert(map(lambda x,y: x+y, [1,1],[2,2]) == [3,3])
Assert(map(None, [1,2,3]) == [1,2,3])
Assert(map(None, [1,2,3], [4,5,6]) == [(1,4),(2,5),(3,6)])
AreEqual(map(lambda x:'a' + x + 'c', 'b'), ['abc'])
def test_range():
AssertErrorWithMessage(TypeError, "range() integer end argument expected, got float.",
range, 2, 5.0)
AssertErrorWithMessage(TypeError, "range() integer step argument expected, got float.",
range, 3, 10, 2.0)
AssertErrorWithMessage(TypeError, "range() integer end argument expected, got float.",
range, float(-2<<32))
AssertErrorWithMessage(TypeError, "range() integer end argument expected, got float.",
range, 0, float(-2<<32))
AssertErrorWithMessage(TypeError, "range() integer start argument expected, got float.",
range, float(-2<<32), 100)
AssertErrorWithMessage(TypeError, "range() integer step argument expected, got float.",
range, 0, 100, float(-2<<32))
AssertErrorWithMessage(TypeError, "range() integer end argument expected, got float.",
range, float(-2<<32), float(-2<<32), float(-2<<32))
def test_sorted():
a = [6,9,4,5,3,1,2,7,8]
Assert(sorted(a) == [1,2,3,4,5,6,7,8,9])
Assert(a == [6,9,4,5,3,1,2,7,8])
Assert(sorted(a, None, None, True) == [9,8,7,6,5,4,3,2,1])
def invcmp(a,b): return -cmp(a,b)
Assert(sorted(range(10), None, None, True) == range(10)[::-1])
Assert(sorted(range(9,-1,-1), None, None, False) == range(10))
Assert(sorted(range(10), invcmp, None, True) == sorted(range(9,-1,-1), None, None, False))
Assert(sorted(range(9,-1,-1),invcmp, None, True) == sorted(range(9,-1,-1), None, None, False))
class P:
def __init__(self, n, s):
self.n = n
self.s = s
def equal_p(a,b): return a.n == b.n and a.s == b.s
def key_p(a): return a.n.lower()
def cmp_s(a,b): return cmp(a.s, b.s)
def cmp_n(a,b): return cmp(a.n, b.n)
a = [P("John",6),P("Jack",9),P("Gary",4),P("Carl",5),P("David",3),P("Joe",1),P("Tom",2),P("Tim",7),P("Todd",8)]
x = sorted(a, cmp_s)
y = [P("Joe",1),P("Tom",2),P("David",3),P("Gary",4),P("Carl",5),P("John",6),P("Tim",7),P("Todd",8),P("Jack",9)]
for i,j in zip(x,y): Assert(equal_p(i,j))
# case sensitive compariso is the default one
a = [P("John",6),P("jack",9),P("gary",4),P("carl",5),P("David",3),P("Joe",1),P("Tom",2),P("Tim",7),P("todd",8)]
x = sorted(a, cmp_n)
y = [P("David",3),P("Joe",1),P("John",6),P("Tim",7),P("Tom",2),P("carl",5),P("gary",4),P("jack",9),P("todd",8)]
for i,j in zip(x,y): Assert(equal_p(i,j))
# now compare using keys - case insensitive
x = sorted(a,None,key_p)
y = [P("carl",5),P("David",3),P("gary",4),P("jack",9),P("Joe",1),P("John",6),P("Tim",7),P("todd",8),P("Tom",2)]
for i,j in zip(x,y): Assert(equal_p(i,j))
d = {'John': 6, 'Jack': 9, 'Gary': 4, 'Carl': 5, 'David': 3, 'Joe': 1, 'Tom': 2, 'Tim': 7, 'Todd': 8}
x = sorted([(v,k) for k,v in d.items()])
Assert(x == [(1, 'Joe'), (2, 'Tom'), (3, 'David'), (4, 'Gary'), (5, 'Carl'), (6, 'John'), (7, 'Tim'), (8, 'Todd'), (9, 'Jack')])
def test_sum():
class user_object(object):
def __add__(self, other):
return self
def __radd__(self, other):
return self
def gen(x):
for a in x: yield a
def sumtest(values, expected):
for value in values, tuple(values), gen(values):
res = sum(values)
AreEqual(res, expected)
AreEqual(type(res), type(expected))
res = sum(values, 0)
AreEqual(res, expected)
AreEqual(type(res), type(expected))
uo = user_object()
# int + other
sumtest([1, 1], 2)
sumtest([2147483647, 1], 2147483648L)
sumtest([1, 1.0], 2.0)
sumtest([1, 1L], 2L)
sumtest([1, uo], uo)
# double and other
sumtest([1.0, 1], 2.0)
sumtest([2147483647.0, 1], 2147483648.0)
sumtest([1.0, 1.0], 2.0)
sumtest([1.0, 1L], 2.0)
sumtest([1.0, uo], uo)
# long and other
sumtest([1L, 1], 2L)
sumtest([2147483647L, 1], 2147483648L)
sumtest([1L, 1.0], 2.0)
sumtest([1L, 1L], 2L)
sumtest([1L, uo], uo)
# corner cases
sumtest([1L, 2.0, 3], 6.0)
sumtest([2147483647, 1, 1.0], 2147483649.0)
inf = 1.7976931348623157e+308*2
sumtest([1.7976931348623157e+308, long(1.7976931348623157e+308)], inf)
AssertError(OverflowError, sum, [1.0, 100000000L<<2000])
def test_unichr():
#Added the following to resolve Codeplex WorkItem #3220.
max_uni = sys.maxunicode
Assert(max_uni==0xFFFF or max_uni==0x10FFFF)
max_uni_plus_one = max_uni + 1
huger_than_max = 100000
max_ok_value = u'\uffff'
#special case for WorkItem #3220
if max_uni==0x10FFFF:
huger_than_max = 10000000
max_ok_value = u'\u0010FFFF' #OK representation for UCS4???
AssertError(ValueError, unichr, -1) # arg must be in the range [0...65535] or [0...1114111] inclusive
AssertError(ValueError, unichr, max_uni_plus_one)
AssertError(ValueError, unichr, huger_than_max)
Assert(unichr(0) == '\x00')
Assert(unichr(max_uni) == max_ok_value)
def test_max_min():
Assert(max([1,2,3]) == 3)
Assert(max((1,2,3)) == 3)
Assert(max(1,2,3) == 3)
Assert(min([1,2,3]) == 1)
Assert(min((1,2,3)) == 1)
Assert(min(1,2,3) == 1)
AreEqual(max((1,2), None), (1, 2))
AreEqual(min((1,2), None), None)
def test_abs():
AssertError(TypeError,abs,None)
#long integer passed to abs
AreEqual(22L, abs(22L))
AreEqual(22L, abs(-22L))
#bool passed to abs
AreEqual(1, abs(True))
AreEqual(0, abs(False))
#__abs__ defined on user type
class myclass:
def __abs__(self):
return "myabs"
c = myclass()
AreEqual("myabs", abs(c))
def test_coerce():
AreEqual(coerce(None, None), (None, None))
AssertError(TypeError, coerce, None, 1)
AssertError(TypeError, coerce, 1, None)
class x(object):
def __init__(self, value):
self.value = value
def __int__(self):
return self.value
def __coerce__(self, other):
return self, x(other)
def __eq__(self, other):
return self.value == other.value
def __repr__(self):
return 'x(' + repr(self.value) + ')'
for value in (x(42), 42., 42L):
AreEqual(int.__coerce__(0, value), NotImplemented)
l, r = coerce(0, value)
AreEqual((r, l), (value, type(value)(0)))
AreEqual((type(l), type(r)), (type(value), type(value)))
def test_zip():
def foo(): yield 2
def bar():
yield 2
yield 3
AreEqual(zip(foo()), [(2,)])
AreEqual(zip(foo(), foo()), [(2,2)])
AreEqual(zip(foo(), foo(), foo()), [(2,2,2)])
AreEqual(zip(bar(), foo()), [(2,2)])
AreEqual(zip(foo(), bar()), [(2,2)])
# test passing the same object for multiple iterables
AreEqual(zip(*[iter([])]), [])
AreEqual(zip(*[iter([])] * 2), [])
AreEqual(zip(*[xrange(3)] * 2), [(0, 0), (1, 1), (2, 2)])
AreEqual(zip(*[iter(["0", "1"])] * 2), [('0', '1')])
AreEqual(zip(*[iter(["0", "1", "2"])] * 3), [('0', '1', '2')])
AreEqual(zip(*'abc'), [('a', 'b', 'c')])
def test_dir():
local_var = 10
AreEqual(dir(), ['local_var'])
def f():
local_var = 10
AreEqual(dir(*()), ['local_var'])
f()
def f():
local_var = 10
AreEqual(dir(**{}), ['local_var'])
f()
def f():
local_var = 10
AreEqual(dir(*(), **{}), ['local_var'])
f()
class A(object):
def __dir__(self):
return ['foo']
def __init__(self):
self.abc = 3
AreEqual(dir(A()), ['foo'])
class C:
a = 1
class D(object, C):
b = 2
Assert('a' in dir(D()))
Assert('b' in dir(D()))
Assert('__hash__' in dir(D()))
if is_cli:
import clr
try:
clr.AddReference("Microsoft.Scripting.Core")
except Exception, e:
if is_net40:
clr.AddReference("System.Core")
else:
raise e
from System.Dynamic import ExpandoObject
eo = ExpandoObject()
eo.bill = 5
Assert('bill' in dir(eo))
def test_ord():
# ord of extensible string
class foo(str): pass
AreEqual(ord(foo('A')), 65)
@skip("silverlight", "Merlin bug #404247: this test doesn't work when the file is executed from non-Python host (thost)" )
def test_top_level_dir():
Assert("__name__" in top_level_dir)
Assert("__builtins__" in top_level_dir)
top_level_dir = dir()
x = 10
y = 20
def test_eval():
d1 = { 'y' : 3 }
d2 = { 'x' : 4 }
AreEqual(eval("x + y", None, d1), 13)
AreEqual(eval("x + y", None, None), 30)
AreEqual(eval("x + y", None), 30)
AreEqual(eval("x + y", None, d2), 24)
AssertError(NameError, eval, "x + y", d1)
AssertError(NameError, eval, "x + y", d1, None)
try:
eval('1+')
AssertUnreachable()
except Exception, exp:
pass
else:
AssertUnreachable()
# test one of each expression in all sorts of combinations
foo = 1
bar = 2
def f(): return 42
exprs = ['foo',
'23',
'$inp and $inp',
'$inp or $inp',
'`42`',
'$inp + $inp',
'f()',
'lambda :42',
'$inp if $inp else $inp',
'[$inp]',
'{$inp:$inp}',
'($inp).__class__',
'{$inp}',
'($inp, )',
'($inp)',
'[x for x in (2, 3, 4)]']
def process(depth):
if(depth > 2):
yield '42'
else:
for x in exprs:
processors = [process(depth + 1)] * x.count('$inp')
if processors:
while 1:
try:
newx = x
for i in xrange(len(processors)):
new = processors[i].next()
newx = newx.replace('$inp', new, 1)
yield newx
except StopIteration:
break
else:
yield x
for x in process(0):
try:
print eval(x)
except SyntaxError: pass
except TypeError: pass
def test_len():
# old-style classes throw AttributeError, new-style classes throw
# TypeError
AssertError(TypeError, len, 2)
class foo: pass
AssertError(AttributeError, len, foo())
class foo(object): pass
AssertError(TypeError, len, foo())
def test_int_ctor():
AreEqual(int('0x10', 16), 16)
AreEqual(int('0X10', 16), 16)
AreEqual(long('0x10', 16), 16L)
AreEqual(long('0X10', 16), 16L)
def test_type():
AreEqual(len(type.__bases__), 1)
AreEqual(type.__bases__[0], object)
def test_globals():
Assert(not globals().has_key("_"))
AreEqual(globals().keys().count("_"), 0)
def test_vars():
"""vars should look for user defined __dict__ value and directly return the provided value"""
class foo(object):
def getDict(self):
return {'a':2}
__dict__ = property(fget=getDict)
AreEqual(vars(foo()), {'a':2})
class foo(object):
def __getattribute__(self, name):
if name == "__dict__":
return {'a':2}
return object.__getattribute__(self, name)
AreEqual(vars(foo()), {'a':2})
class foo(object):
def getDict(self):
return 'abc'
__dict__ = property(fget=getDict)
AreEqual(vars(foo()), 'abc')
class foo(object):
def __getattribute__(self, name):
if name == "__dict__":
return 'abc'
return object.__getattribute__(self, name)
AreEqual(vars(foo()), 'abc')
def f():
local_var = 10
AreEqual(vars(*()), {'local_var' : 10})
f()
def f():
local_var = 10
AreEqual(vars(**{}), {'local_var' : 10})
f()
def f():
local_var = 10
AreEqual(vars(*(), **{}), {'local_var' : 10})
f()
def test_compile():
for x in ['exec', 'eval', 'single']:
c = compile('2', 'foo', x)
AreEqual(c.co_filename, 'foo')
class mystdout(object):
def __init__(self):
self.data = []
def write(self, data):
self.data.append(data)
import sys
out = mystdout()
sys.stdout = out
try:
c = compile('2', 'test', 'single')
exec c
AreEqual(out.data, ['2', '\n'])
finally:
sys.stdout = sys.__stdout__
for code in ["abc" + chr(0) + "def", chr(0) + "def", "def" + chr(0)]:
AssertError(TypeError, compile, code, 'f', 'exec')
def test_str_none():
class foo(object):
def __str__(self):
return None
AreEqual(foo().__str__(), None)
AssertError(TypeError, str, foo())
def test_not_in_globals():
AssertError(NameError, lambda: __dict__)
AssertError(NameError, lambda: __module__)
AssertError(NameError, lambda: __class__)
AssertError(NameError, lambda: __init__)
# Regress bug 319126: __int__ on long should return long, not overflow
def test_long_int():
l=long(1.23e300)
i = l.__int__()
Assert(type(l) == type(i))
Assert(i == l)
def test_round():
AreEqual(round(number=3.4), 3.0)
AreEqual(round(number=3.125, ndigits=3), 3.125)
AreEqual(round(number=3.125, ndigits=0), 3)
def test_cp16000():
class K(object):
FOO = 39
def fooFunc():
return K.FOO
def memberFunc(self):
return K.FOO * 3.14
temp_list = [ None, str, int, long, K,
"", "abc", u"abc", 34, 1111111111111L, 3.14, K(), K.FOO,
id, hex, K.fooFunc, K.memberFunc, K().memberFunc,
]
if is_cli:
import System
temp_list += [ System.Exception, System.InvalidOperationException(),
System.Single, System.UInt16(5), System.Version()]
for x in temp_list:
Assert(type(id(x)) in [int, long],
str(type(id(x))))
#------------------------------------------------------------------------------
def test_locals_contains():
global locals_globals
locals_globals = 2
def func():
Assert(not 'locals_globals' in locals())
func()
def in_main():
Assert(not 'locals_globals' in locals())
Assert(not 'locals_globals' in globals())
def in_main_sub1():
Assert(not 'locals_globals' in locals())
Assert(not 'locals_globals' in globals())
def in_main_sub2():
global local_globals
Assert(not 'locals_globals' in locals())
Assert('locals_globals' in globals())
def in_main_sub3():
local_globals = 42
Assert(not 'locals_globals' in locals())
Assert('locals_globals' in globals())
in_main_sub3()
in_main_sub1()
return in_main_sub2
def test_error_messages():
AssertErrorWithMessages(TypeError, "join() takes exactly 1 argument (2 given)", "join() takes exactly one argument (2 given)", "".join, ["a", "b"], "c")
def test_enumerate():
class MyIndex(object):
def __init__(self, value):
self.value = value
def __index__(self):
return self.value
for value_maker in MyIndex, lambda x: x:
AreEqual([(10, 2), (11, 3), (12, 4)], list(enumerate([2,3,4], value_maker(10))))
AreEqual([(10, 2), (11, 3), (12, 4)], list(enumerate([2,3,4], start=value_maker(10))))
AreEqual([(2147483647, 2), (2147483648, 3), (2147483649, 4)], list(enumerate([2,3,4], value_maker(int((1<<31) - 1)))))
AreEqual([(2147483648, 2), (2147483649, 3), (2147483650, 4)], list(enumerate([2,3,4], value_maker(1<<31))))
temp_func = in_main()
locals_globals = 7
temp_func()
#------------------------------------------------------------------------------
run_test(__name__)
| apache-2.0 | 4,978,919,575,645,806,000 | 28.726141 | 156 | 0.520659 | false |
direvus/ansible | lib/ansible/modules/network/aci/aci_tenant_span_src_group.py | 10 | 7001 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_tenant_span_src_group
short_description: Manage SPAN source groups (span:SrcGrp)
description:
- Manage SPAN source groups on Cisco ACI fabrics.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information about the internal APIC class B(span:SrcGrp) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
admin_state:
description:
- Enable or disable the span sources.
- The APIC defaults to C(yes) when unset during creation.
type: bool
description:
description:
- The description for Span source group.
aliases: [ descr ]
dst_group:
description:
- The Span destination group to associate with the source group.
src_group:
description:
- The name of the Span source group.
aliases: [ name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the Tenant.
aliases: [ tenant_name ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- aci_tenant_span_src_group:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
src_group: "{{ src_group }}"
dst_group: "{{ dst_group }}"
admin_state: "{{ admin_state }}"
description: "{{ description }}"
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
admin_state=dict(type='raw'), # Turn into a boolean in v2.9
description=dict(type='str', aliases=['descr']),
dst_group=dict(type='str'),
src_group=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['src_group', 'tenant']],
['state', 'present', ['src_group', 'tenant']],
],
)
aci = ACIModule(module)
admin_state = aci.boolean(module.params['admin_state'], 'enabled', 'disabled')
description = module.params['description']
dst_group = module.params['dst_group']
src_group = module.params['src_group']
state = module.params['state']
tenant = module.params['tenant']
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='spanSrcGrp',
aci_rn='srcgrp-{0}'.format(src_group),
module_object=src_group,
target_filter={'name': src_group},
),
child_classes=['spanSpanLbl'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='spanSrcGrp',
class_config=dict(
adminSt=admin_state,
descr=description,
name=src_group,
),
child_configs=[{'spanSpanLbl': {'attributes': {'name': dst_group}}}],
)
aci.get_diff(aci_class='spanSrcGrp')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 | 3,400,795,473,265,557,000 | 27.229839 | 141 | 0.59163 | false |
jniediek/mne-python | mne/tests/test_dipole.py | 3 | 15060 | import os
import os.path as op
import sys
import warnings
import numpy as np
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import assert_allclose
from mne import (read_dipole, read_forward_solution,
convert_forward_solution, read_evokeds, read_cov,
SourceEstimate, write_evokeds, fit_dipole,
transform_surface_to, make_sphere_model, pick_types,
pick_info, EvokedArray, read_source_spaces, make_ad_hoc_cov,
make_forward_solution, Dipole, DipoleFixed, Epochs,
make_fixed_length_events)
from mne.simulation import simulate_evoked
from mne.datasets import testing
from mne.utils import (run_tests_if_main, _TempDir, slow_test, requires_mne,
run_subprocess)
from mne.proj import make_eeg_average_ref_proj
from mne.io import read_raw_fif, read_raw_ctf
from mne.surface import _compute_nearest
from mne.bem import _bem_find_surface, read_bem_solution
from mne.transforms import apply_trans, _get_trans
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_src = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-2-src.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_xfit_dip = op.join(data_path, 'dip', 'fixed_auto.fif')
fname_xfit_dip_txt = op.join(data_path, 'dip', 'fixed_auto.dip')
fname_xfit_seq_txt = op.join(data_path, 'dip', 'sequential.dip')
fname_ctf = op.join(data_path, 'CTF', 'testdata_ctf_short.ds')
subjects_dir = op.join(data_path, 'subjects')
def _compare_dipoles(orig, new):
"""Compare dipole results for equivalence."""
assert_allclose(orig.times, new.times, atol=1e-3, err_msg='times')
assert_allclose(orig.pos, new.pos, err_msg='pos')
assert_allclose(orig.amplitude, new.amplitude, err_msg='amplitude')
assert_allclose(orig.gof, new.gof, err_msg='gof')
assert_allclose(orig.ori, new.ori, rtol=1e-4, atol=1e-4, err_msg='ori')
assert_equal(orig.name, new.name)
def _check_dipole(dip, n_dipoles):
"""Check dipole sizes."""
assert_equal(len(dip), n_dipoles)
assert_equal(dip.pos.shape, (n_dipoles, 3))
assert_equal(dip.ori.shape, (n_dipoles, 3))
assert_equal(dip.gof.shape, (n_dipoles,))
assert_equal(dip.amplitude.shape, (n_dipoles,))
@testing.requires_testing_data
def test_io_dipoles():
"""Test IO for .dip files."""
tempdir = _TempDir()
dipole = read_dipole(fname_dip)
print(dipole) # test repr
out_fname = op.join(tempdir, 'temp.dip')
dipole.save(out_fname)
dipole_new = read_dipole(out_fname)
_compare_dipoles(dipole, dipole_new)
@testing.requires_testing_data
def test_dipole_fitting_ctf():
"""Test dipole fitting with CTF data."""
raw_ctf = read_raw_ctf(fname_ctf).set_eeg_reference()
events = make_fixed_length_events(raw_ctf, 1)
evoked = Epochs(raw_ctf, events, 1, 0, 0, baseline=None,
add_eeg_ref=False).average()
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.))
# XXX Eventually we should do some better checks about accuracy, but
# for now our CTF phantom fitting tutorials will have to do
# (otherwise we need to add that to the testing dataset, which is
# a bit too big)
fit_dipole(evoked, cov, sphere)
@slow_test
@testing.requires_testing_data
@requires_mne
def test_dipole_fitting():
"""Test dipole fitting."""
amp = 10e-9
tempdir = _TempDir()
rng = np.random.RandomState(0)
fname_dtemp = op.join(tempdir, 'test.dip')
fname_sim = op.join(tempdir, 'test-ave.fif')
fwd = convert_forward_solution(read_forward_solution(fname_fwd),
surf_ori=False, force_fixed=True)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
n_per_hemi = 5
vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
for s in fwd['src']]
nv = sum(len(v) for v in vertices)
stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
evoked = simulate_evoked(fwd, stc, evoked.info, cov, snr=20,
random_state=rng)
# For speed, let's use a subset of channels (strange but works)
picks = np.sort(np.concatenate([
pick_types(evoked.info, meg=True, eeg=False)[::2],
pick_types(evoked.info, meg=False, eeg=True)[::2]]))
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
write_evokeds(fname_sim, evoked)
# Run MNE-C version
run_subprocess([
'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
'--noise', fname_cov, '--dip', fname_dtemp,
'--mri', fname_fwd, '--reg', '0', '--tmin', '0',
])
dip_c = read_dipole(fname_dtemp)
# Run mne-python version
sphere = make_sphere_model(head_radius=0.1)
dip, residuals = fit_dipole(evoked, fname_cov, sphere, fname_fwd)
# Sanity check: do our residuals have less power than orig data?
data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
resi_rms = np.sqrt(np.sum(residuals ** 2, axis=0))
factor = 1.
# XXX weird, inexplicable differenc for 3.5 build we'll assume is due to
# Anaconda bug for now...
if os.getenv('TRAVIS', 'false') == 'true' and \
sys.version[:3] in ('3.5', '2.7'):
factor = 0.8
assert_true((data_rms > factor * resi_rms).all(),
msg='%s (factor: %s)' % ((data_rms / resi_rms).min(), factor))
# Compare to original points
transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
# MNE-C skips the last "time" point :(
dip.crop(dip_c.times[0], dip_c.times[-1])
src_rr, src_nn = src_rr[:-1], src_nn[:-1]
# check that we did at least as well
corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
for d in (dip_c, dip):
new = d.pos
diffs = new - src_rr
corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
axis=1)))]
amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
gofs += [np.mean(d.gof)]
assert_true(dists[0] >= dists[1] * factor, 'dists: %s' % dists)
assert_true(corrs[0] <= corrs[1] / factor, 'corrs: %s' % corrs)
assert_true(gc_dists[0] >= gc_dists[1] * factor,
'gc-dists (ori): %s' % gc_dists)
assert_true(amp_errs[0] >= amp_errs[1] * factor,
'amplitude errors: %s' % amp_errs)
assert_true(gofs[0] <= gofs[1] / factor, 'gof: %s' % gofs)
@testing.requires_testing_data
def test_dipole_fitting_fixed():
"""Test dipole fitting with a fixed position."""
tpeak = 0.073
sphere = make_sphere_model(head_radius=0.1)
evoked = read_evokeds(fname_evo, baseline=(None, 0))[0]
evoked.pick_types(meg=True)
t_idx = np.argmin(np.abs(tpeak - evoked.times))
evoked_crop = evoked.copy().crop(tpeak, tpeak)
assert_equal(len(evoked_crop.times), 1)
cov = read_cov(fname_cov)
dip_seq, resid = fit_dipole(evoked_crop, cov, sphere)
assert_true(isinstance(dip_seq, Dipole))
assert_equal(len(dip_seq.times), 1)
pos, ori, gof = dip_seq.pos[0], dip_seq.ori[0], dip_seq.gof[0]
amp = dip_seq.amplitude[0]
# Fix position, allow orientation to change
dip_free, resid_free = fit_dipole(evoked, cov, sphere, pos=pos)
assert_true(isinstance(dip_free, Dipole))
assert_allclose(dip_free.times, evoked.times)
assert_allclose(np.tile(pos[np.newaxis], (len(evoked.times), 1)),
dip_free.pos)
assert_allclose(ori, dip_free.ori[t_idx]) # should find same ori
assert_true(np.dot(dip_free.ori, ori).mean() < 0.9) # but few the same
assert_allclose(gof, dip_free.gof[t_idx]) # ... same gof
assert_allclose(amp, dip_free.amplitude[t_idx]) # and same amp
assert_allclose(resid, resid_free[:, [t_idx]])
# Fix position and orientation
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
assert_true(isinstance(dip_fixed, DipoleFixed))
assert_allclose(dip_fixed.times, evoked.times)
assert_allclose(dip_fixed.info['chs'][0]['loc'][:3], pos)
assert_allclose(dip_fixed.info['chs'][0]['loc'][3:6], ori)
assert_allclose(dip_fixed.data[1, t_idx], gof)
assert_allclose(resid, resid_fixed[:, [t_idx]])
_check_roundtrip_fixed(dip_fixed)
# Degenerate conditions
assert_raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0])
assert_raises(ValueError, fit_dipole, evoked, cov, sphere, ori=[1, 0, 0])
assert_raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0, 0, 0],
ori=[2, 0, 0])
assert_raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0.1, 0, 0])
@testing.requires_testing_data
def test_len_index_dipoles():
"""Test len and indexing of Dipole objects."""
dipole = read_dipole(fname_dip)
d0 = dipole[0]
d1 = dipole[:1]
_check_dipole(d0, 1)
_check_dipole(d1, 1)
_compare_dipoles(d0, d1)
mask = dipole.gof > 15
idx = np.where(mask)[0]
d_mask = dipole[mask]
_check_dipole(d_mask, 4)
_compare_dipoles(d_mask, dipole[idx])
@testing.requires_testing_data
def test_min_distance_fit_dipole():
"""Test dipole min_dist to inner_skull."""
subject = 'sample'
raw = read_raw_fif(fname_raw, preload=True, add_eeg_ref=False)
# select eeg data
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
info = pick_info(raw.info, picks)
# Let's use cov = Identity
cov = read_cov(fname_cov)
cov['data'] = np.eye(cov['data'].shape[0])
# Simulated scal map
simulated_scalp_map = np.zeros(picks.shape[0])
simulated_scalp_map[27:34] = 1
simulated_scalp_map = simulated_scalp_map[:, None]
evoked = EvokedArray(simulated_scalp_map, info, tmin=0)
min_dist = 5. # distance in mm
bem = read_bem_solution(fname_bem)
dip, residual = fit_dipole(evoked, cov, bem, fname_trans,
min_dist=min_dist)
dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)
# Constraints are not exact, so bump the minimum slightly
assert_true(min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))
assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
-1.)
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
"""Compute dipole depth."""
trans = _get_trans(fname_trans)[0]
bem = read_bem_solution(fname_bem)
surf = _bem_find_surface(bem, 'inner_skull')
points = surf['rr']
points = apply_trans(trans['trans'], points)
depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
return np.ravel(depth)
@testing.requires_testing_data
def test_accuracy():
"""Test dipole fitting to sub-mm accuracy."""
evoked = read_evokeds(fname_evo)[0].crop(0., 0.,)
evoked.pick_types(meg=True, eeg=False)
evoked.pick_channels([c for c in evoked.ch_names[::4]])
for rad, perc_90 in zip((0.09, None), (0.002, 0.004)):
bem = make_sphere_model('auto', rad, evoked.info,
relative_radii=(0.999, 0.998, 0.997, 0.995))
src = read_source_spaces(fname_src)
fwd = make_forward_solution(evoked.info, None, src, bem)
fwd = convert_forward_solution(fwd, force_fixed=True)
vertices = [src[0]['vertno'], src[1]['vertno']]
n_vertices = sum(len(v) for v in vertices)
amp = 10e-9
data = np.eye(n_vertices + 1)[:n_vertices]
data[-1, -1] = 1.
data *= amp
stc = SourceEstimate(data, vertices, 0., 1e-3, 'sample')
sim = simulate_evoked(fwd, stc, evoked.info, cov=None, snr=np.inf)
cov = make_ad_hoc_cov(evoked.info)
dip = fit_dipole(sim, cov, bem, min_dist=0.001)[0]
ds = []
for vi in range(n_vertices):
if vi < len(vertices[0]):
hi = 0
vertno = vi
else:
hi = 1
vertno = vi - len(vertices[0])
vertno = src[hi]['vertno'][vertno]
rr = src[hi]['rr'][vertno]
d = np.sqrt(np.sum((rr - dip.pos[vi]) ** 2))
ds.append(d)
# make sure that our median is sub-mm and the large majority are very
# close (we expect some to be off by a bit e.g. because they are
# radial)
assert_true((np.percentile(ds, [50, 90]) < [0.0005, perc_90]).all())
@testing.requires_testing_data
def test_dipole_fixed():
"""Test reading a fixed-position dipole (from Xfit)."""
dip = read_dipole(fname_xfit_dip)
_check_roundtrip_fixed(dip)
with warnings.catch_warnings(record=True) as w: # unused fields
dip_txt = read_dipole(fname_xfit_dip_txt)
assert_true(any('extra fields' in str(ww.message) for ww in w))
assert_allclose(dip.info['chs'][0]['loc'][:3], dip_txt.pos[0])
assert_allclose(dip_txt.amplitude[0], 12.1e-9)
with warnings.catch_warnings(record=True): # unused fields
dip_txt_seq = read_dipole(fname_xfit_seq_txt)
assert_allclose(dip_txt_seq.gof, [27.3, 46.4, 43.7, 41., 37.3, 32.5])
def _check_roundtrip_fixed(dip):
"""Helper to test roundtrip IO for fixed dipoles."""
tempdir = _TempDir()
dip.save(op.join(tempdir, 'test-dip.fif.gz'))
dip_read = read_dipole(op.join(tempdir, 'test-dip.fif.gz'))
assert_allclose(dip_read.data, dip_read.data)
assert_allclose(dip_read.times, dip.times)
assert_equal(dip_read.info['xplotter_layout'], dip.info['xplotter_layout'])
assert_equal(dip_read.ch_names, dip.ch_names)
for ch_1, ch_2 in zip(dip_read.info['chs'], dip.info['chs']):
assert_equal(ch_1['ch_name'], ch_2['ch_name'])
for key in ('loc', 'kind', 'unit_mul', 'range', 'coord_frame', 'unit',
'cal', 'coil_type', 'scanno', 'logno'):
assert_allclose(ch_1[key], ch_2[key], err_msg=key)
run_tests_if_main(False)
| bsd-3-clause | 2,882,449,139,367,200,000 | 40.487603 | 79 | 0.615007 | false |
photoninger/ansible | lib/ansible/modules/cloud/azure/azure_rm_deployment.py | 16 | 27257 | #!/usr/bin/python
#
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_deployment
short_description: Create or destroy Azure Resource Manager template deployments
version_added: "2.1"
description:
- "Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python.
You can find some quick start templates in GitHub here https://github.com/azure/azure-quickstart-templates.
For more information on Azue resource manager templates see https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/."
options:
resource_group_name:
description:
- The resource group name to use or create to host the deployed template
required: true
location:
description:
- The geo-locations in which the resource group will be located.
required: false
default: westus
deployment_mode:
description:
- In incremental mode, resources are deployed without deleting existing resources that are not included in the template.
In complete mode resources are deployed and existing resources in the resource group not included in the template are deleted.
required: false
default: incremental
choices:
- complete
- incremental
state:
description:
- If state is "present", template will be created. If state is "present" and if deployment exists, it will be
updated. If state is "absent", stack will be removed.
default: present
required: false
choices:
- present
- absent
template:
description:
- A hash containing the templates inline. This parameter is mutually exclusive with 'template_link'.
Either one of them is required if "state" parameter is "present".
required: false
default: null
template_link:
description:
- Uri of file containing the template body. This parameter is mutually exclusive with 'template'. Either one
of them is required if "state" parameter is "present".
required: false
default: null
parameters:
description:
- A hash of all the required template variables for the deployment template. This parameter is mutually exclusive
with 'parameters_link'. Either one of them is required if "state" parameter is "present".
required: false
default: null
parameters_link:
description:
- Uri of file containing the parameters body. This parameter is mutually exclusive with 'parameters'. Either
one of them is required if "state" parameter is "present".
required: false
default: null
deployment_name:
description:
- The name of the deployment to be tracked in the resource group deployment history. Re-using a deployment name
will overwrite the previous value in the resource group's deployment history.
default: ansible-arm
wait_for_deployment_completion:
description:
- Whether or not to block until the deployment has completed.
default: yes
choices: ['yes', 'no']
wait_for_deployment_polling_period:
description:
- Time (in seconds) to wait between polls when waiting for deployment completion.
default: 10
extends_documentation_fragment:
- azure
author:
- David Justice (@devigned)
- Laurent Mazuel (@lmazuel)
- Andre Price (@obsoleted)
'''
EXAMPLES = '''
# Destroy a template deployment
- name: Destroy Azure Deploy
azure_rm_deployment:
state: absent
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
# Create or update a template deployment based on uris using parameter and template links
- name: Create Azure Deploy
azure_rm_deployment:
state: present
resource_group_name: dev-ops-cle
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json'
parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json'
# Create or update a template deployment based on a uri to the template and parameters specified inline.
# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then
# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH.
---
- hosts: localhost
connection: local
gather_facts: no
tasks:
- name: Destroy Azure Deploy
azure_rm_deployment:
state: absent
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
- name: Create Azure Deploy
azure_rm_deployment:
state: present
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
parameters:
newStorageAccountName:
value: devopsclestorage1
adminUsername:
value: devopscle
dnsNameForPublicIP:
value: devopscleazure
location:
value: West US
vmSize:
value: Standard_A2
vmName:
value: ansibleSshVm
sshKeyData:
value: YOUR_SSH_PUBLIC_KEY
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json'
register: azure
- name: Add new instance to host group
add_host:
hostname: "{{ item['ips'][0].public_ip }}"
groupname: azure_vms
with_items: "{{ azure.deployment.instances }}"
- hosts: azure_vms
user: devopscle
tasks:
- name: Wait for SSH to come up
wait_for:
port: 22
timeout: 2000
state: started
- name: echo the hostname of the vm
shell: hostname
# Deploy an Azure WebApp running a hello world'ish node app
- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js
azure_rm_deployment:
state: present
subscription_id: cbbdaed0-fea9-4693-bf0c-d446ac93c030
resource_group_name: dev-ops-cle-webapp
parameters:
repoURL:
value: 'https://github.com/devigned/az-roadshow-oss.git'
siteName:
value: devopscleweb
hostingPlanName:
value: someplan
siteLocation:
value: westus
sku:
value: Standard
template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json'
# Create or update a template deployment based on an inline template and parameters
- name: Create Azure Deploy
azure_rm_deployment:
state: present
subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name: dev-ops-cle
template:
$schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#"
contentVersion: "1.0.0.0"
parameters:
newStorageAccountName:
type: "string"
metadata:
description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed."
adminUsername:
type: "string"
metadata:
description: "User name for the Virtual Machine."
adminPassword:
type: "securestring"
metadata:
description: "Password for the Virtual Machine."
dnsNameForPublicIP:
type: "string"
metadata:
description: "Unique DNS Name for the Public IP used to access the Virtual Machine."
ubuntuOSVersion:
type: "string"
defaultValue: "14.04.2-LTS"
allowedValues:
- "12.04.5-LTS"
- "14.04.2-LTS"
- "15.04"
metadata:
description: >
The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version.
Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
variables:
location: "West US"
imagePublisher: "Canonical"
imageOffer: "UbuntuServer"
OSDiskName: "osdiskforlinuxsimple"
nicName: "myVMNic"
addressPrefix: "192.0.2.0/24"
subnetName: "Subnet"
subnetPrefix: "10.0.0.0/24"
storageAccountType: "Standard_LRS"
publicIPAddressName: "myPublicIP"
publicIPAddressType: "Dynamic"
vmStorageAccountContainerName: "vhds"
vmName: "MyUbuntuVM"
vmSize: "Standard_D1"
virtualNetworkName: "MyVNET"
vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]"
subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]"
resources:
- type: "Microsoft.Storage/storageAccounts"
name: "[parameters('newStorageAccountName')]"
apiVersion: "2015-05-01-preview"
location: "[variables('location')]"
properties:
accountType: "[variables('storageAccountType')]"
- apiVersion: "2015-05-01-preview"
type: "Microsoft.Network/publicIPAddresses"
name: "[variables('publicIPAddressName')]"
location: "[variables('location')]"
properties:
publicIPAllocationMethod: "[variables('publicIPAddressType')]"
dnsSettings:
domainNameLabel: "[parameters('dnsNameForPublicIP')]"
- type: "Microsoft.Network/virtualNetworks"
apiVersion: "2015-05-01-preview"
name: "[variables('virtualNetworkName')]"
location: "[variables('location')]"
properties:
addressSpace:
addressPrefixes:
- "[variables('addressPrefix')]"
subnets:
-
name: "[variables('subnetName')]"
properties:
addressPrefix: "[variables('subnetPrefix')]"
- type: "Microsoft.Network/networkInterfaces"
apiVersion: "2015-05-01-preview"
name: "[variables('nicName')]"
location: "[variables('location')]"
dependsOn:
- "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]"
- "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
properties:
ipConfigurations:
-
name: "ipconfig1"
properties:
privateIPAllocationMethod: "Dynamic"
publicIPAddress:
id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]"
subnet:
id: "[variables('subnetRef')]"
- type: "Microsoft.Compute/virtualMachines"
apiVersion: "2015-06-15"
name: "[variables('vmName')]"
location: "[variables('location')]"
dependsOn:
- "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]"
- "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]"
properties:
hardwareProfile:
vmSize: "[variables('vmSize')]"
osProfile:
computername: "[variables('vmName')]"
adminUsername: "[parameters('adminUsername')]"
adminPassword: "[parameters('adminPassword')]"
storageProfile:
imageReference:
publisher: "[variables('imagePublisher')]"
offer: "[variables('imageOffer')]"
sku: "[parameters('ubuntuOSVersion')]"
version: "latest"
osDisk:
name: "osdisk"
vhd:
uri: >
[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',
variables('OSDiskName'),'.vhd')]
caching: "ReadWrite"
createOption: "FromImage"
networkProfile:
networkInterfaces:
-
id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]"
diagnosticsProfile:
bootDiagnostics:
enabled: "true"
storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]"
parameters:
newStorageAccountName:
value: devopsclestorage
adminUsername:
value: devopscle
adminPassword:
value: Password1!
dnsNameForPublicIP:
value: devopscleazure
'''
RETURN = '''
deployment:
description: Deployment details
type: dict
returned: always
sample:
group_name:
description: Name of the resource group
type: string
returned: always
id:
description: The Azure ID of the deployment
type: string
returned: always
instances:
description: Provides the public IP addresses for each VM instance.
type: list
returned: always
name:
description: Name of the deployment
type: string
returned: always
outputs:
description: Dictionary of outputs received from the deployment
type: dict
returned: always
'''
import time
try:
from azure.common.credentials import ServicePrincipalCredentials
import time
import yaml
except ImportError as exc:
IMPORT_ERROR = "Error importing module prerequisites: %s" % exc
try:
from itertools import chain
from azure.common.exceptions import CloudError
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
class AzureRMDeploymentManager(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(type='str', required=True, aliases=['resource_group']),
state=dict(type='str', default='present', choices=['present', 'absent']),
template=dict(type='dict', default=None),
parameters=dict(type='dict', default=None),
template_link=dict(type='str', default=None),
parameters_link=dict(type='str', default=None),
location=dict(type='str', default="westus"),
deployment_mode=dict(type='str', default='incremental', choices=['complete', 'incremental']),
deployment_name=dict(type='str', default="ansible-arm"),
wait_for_deployment_completion=dict(type='bool', default=True),
wait_for_deployment_polling_period=dict(type='int', default=10)
)
mutually_exclusive = [('template', 'template_link'),
('parameters', 'parameters_link')]
self.resource_group_name = None
self.state = None
self.template = None
self.parameters = None
self.template_link = None
self.parameters_link = None
self.location = None
self.deployment_mode = None
self.deployment_name = None
self.wait_for_deployment_completion = None
self.wait_for_deployment_polling_period = None
self.tags = None
self.results = dict(
deployment=dict(),
changed=False,
msg=""
)
super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=False)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
if self.state == 'present':
deployment = self.deploy_template()
if deployment is None:
self.results['deployment'] = dict(
name=self.deployment_name,
group_name=self.resource_group_name,
id=None,
outputs=None,
instances=None
)
else:
self.results['deployment'] = dict(
name=deployment.name,
group_name=self.resource_group_name,
id=deployment.id,
outputs=deployment.properties.outputs,
instances=self._get_instances(deployment)
)
self.results['changed'] = True
self.results['msg'] = 'deployment succeeded'
else:
if self.resource_group_exists(self.resource_group_name):
self.destroy_resource_group()
self.results['changed'] = True
self.results['msg'] = "deployment deleted"
return self.results
def deploy_template(self):
"""
Deploy the targeted template and parameters
:param module: Ansible module containing the validated configuration for the deployment template
:param client: resource management client for azure
:param conn_info: connection info needed
:return:
"""
deploy_parameter = self.rm_models.DeploymentProperties(self.deployment_mode)
if not self.parameters_link:
deploy_parameter.parameters = self.parameters
else:
deploy_parameter.parameters_link = self.rm_models.ParametersLink(
uri=self.parameters_link
)
if not self.template_link:
deploy_parameter.template = self.template
else:
deploy_parameter.template_link = self.rm_models.TemplateLink(
uri=self.template_link
)
params = self.rm_models.ResourceGroup(location=self.location, tags=self.tags)
try:
self.rm_client.resource_groups.create_or_update(self.resource_group_name, params)
except CloudError as exc:
self.fail("Resource group create_or_update failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
try:
result = self.rm_client.deployments.create_or_update(self.resource_group_name,
self.deployment_name,
deploy_parameter)
deployment_result = None
if self.wait_for_deployment_completion:
deployment_result = self.get_poller_result(result)
while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
'Succeeded']:
time.sleep(self.wait_for_deployment_polling_period)
deployment_result = self.rm_client.deployments.get(self.resource_group_name, self.deployment_name)
except CloudError as exc:
failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
self.log("Deployment failed %s: %s" % (exc.status_code, exc.message))
self.fail("Deployment failed with status code: %s and message: %s" % (exc.status_code, exc.message),
failed_deployment_operations=failed_deployment_operations)
if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded':
self.log("provisioning state: %s" % deployment_result.properties.provisioning_state)
failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
self.fail('Deployment failed. Deployment id: %s' % deployment_result.id,
failed_deployment_operations=failed_deployment_operations)
return deployment_result
def destroy_resource_group(self):
"""
Destroy the targeted resource group
"""
try:
result = self.rm_client.resource_groups.delete(self.resource_group_name)
result.wait() # Blocking wait till the delete is finished
except CloudError as e:
if e.status_code == 404 or e.status_code == 204:
return
else:
self.fail("Delete resource group and deploy failed with status code: %s and message: %s" %
(e.status_code, e.message))
def resource_group_exists(self, resource_group):
'''
Return True/False based on existence of requested resource group.
:param resource_group: string. Name of a resource group.
:return: boolean
'''
try:
self.rm_client.resource_groups.get(resource_group)
except CloudError:
return False
return True
def _get_failed_nested_operations(self, current_operations):
new_operations = []
for operation in current_operations:
if operation.properties.provisioning_state == 'Failed':
new_operations.append(operation)
if operation.properties.target_resource and \
'Microsoft.Resources/deployments' in operation.properties.target_resource.id:
nested_deployment = operation.properties.target_resource.resource_name
try:
nested_operations = self.rm_client.deployment_operations.list(self.resource_group_name,
nested_deployment)
except CloudError as exc:
self.fail("List nested deployment operations failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
new_nested_operations = self._get_failed_nested_operations(nested_operations)
new_operations += new_nested_operations
return new_operations
def _get_failed_deployment_operations(self, deployment_name):
results = []
# time.sleep(15) # there is a race condition between when we ask for deployment status and when the
# # status is available.
try:
operations = self.rm_client.deployment_operations.list(self.resource_group_name, deployment_name)
except CloudError as exc:
self.fail("Get deployment failed with status code: %s and message: %s" %
(exc.status_code, exc.message))
try:
results = [
dict(
id=op.id,
operation_id=op.operation_id,
status_code=op.properties.status_code,
status_message=op.properties.status_message,
target_resource=dict(
id=op.properties.target_resource.id,
resource_name=op.properties.target_resource.resource_name,
resource_type=op.properties.target_resource.resource_type
) if op.properties.target_resource else None,
provisioning_state=op.properties.provisioning_state,
)
for op in self._get_failed_nested_operations(operations)
]
except:
# If we fail here, the original error gets lost and user receives wrong error message/stacktrace
pass
self.log(dict(failed_deployment_operations=results), pretty_print=True)
return results
def _get_instances(self, deployment):
dep_tree = self._build_hierarchy(deployment.properties.dependencies)
vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines")
vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces"))
for vm in vms]
vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics))
for vm, nics in vms_and_nics]
return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip)
for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0]
def _get_dependencies(self, dep_tree, resource_type):
matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type]
for child_tree in [value['children'] for value in dep_tree.values()]:
matches += self._get_dependencies(child_tree, resource_type)
return matches
def _build_hierarchy(self, dependencies, tree=None):
tree = dict(top=True) if tree is None else tree
for dep in dependencies:
if dep.resource_name not in tree:
tree[dep.resource_name] = dict(dep=dep, children=dict())
if isinstance(dep, self.rm_models.Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0:
self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children'])
if 'top' in tree:
tree.pop('top', None)
keys = list(tree.keys())
for key1 in keys:
for key2 in keys:
if key2 in tree and key1 in tree[key2]['children'] and key1 in tree:
tree[key2]['children'][key1] = tree[key1]
tree.pop(key1)
return tree
def _get_ip_dict(self, ip):
ip_dict = dict(name=ip.name,
id=ip.id,
public_ip=ip.ip_address,
public_ip_allocation_method=str(ip.public_ip_allocation_method)
)
if ip.dns_settings:
ip_dict['dns_settings'] = {
'domain_name_label': ip.dns_settings.domain_name_label,
'fqdn': ip.dns_settings.fqdn
}
return ip_dict
def _nic_to_public_ips_instance(self, nics):
return [self.network_client.public_ip_addresses.get(public_ip_id.split('/')[4], public_ip_id.split('/')[-1])
for nic_obj in (self.network_client.network_interfaces.get(self.resource_group_name,
nic['dep'].resource_name) for nic in nics)
for public_ip_id in [ip_conf_instance.public_ip_address.id
for ip_conf_instance in nic_obj.ip_configurations
if ip_conf_instance.public_ip_address]]
def main():
AzureRMDeploymentManager()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,002,993,342,971,820,200 | 40.049699 | 157 | 0.60091 | false |
rfhk/awo-custom | account_financial_report_webkit/report/print_journal.py | 2 | 7650 | # -*- coding: utf-8 -*-
##############################################################################
#
# account_financial_report_webkit module for OpenERP, Webkit based
# extended report financial report
# Copyright (C) 2012 SYLEAM Info Services (<http://www.syleam.fr/>)
# Sebastien LANGE <[email protected]>
#
# This file is a part of account_financial_report_webkit
#
# account_financial_report_webkit is free software: you can redistribute it
# and/or modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# account_financial_report_webkit is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report import report_sxw
from openerp.tools.translate import _
from openerp.modules.registry import RegistryManager
from datetime import datetime
from .common_reports import CommonReportHeaderWebkit
from .webkit_parser_header_fix import HeaderFooterTextWebKitParser
class PrintJournalWebkit(report_sxw.rml_parse, CommonReportHeaderWebkit):
def __init__(self, cursor, uid, name, context):
super(PrintJournalWebkit, self).__init__(cursor, uid, name,
context=context)
self.pool = RegistryManager.get(self.cr.dbname)
self.cursor = self.cr
company_obj = self.pool.get('res.company')
company_id = company_obj._company_default_get(self.cr, uid,
'res.users',
context=context)
company = company_obj.browse(self.cr, uid, company_id, context=context)
header_report_name = ' - '.join((_('JOURNALS'), company.name,
company.currency_id.name))
footer_date_time = self.formatLang(str(datetime.today()),
date_time=True)
self.localcontext.update({
'cr': cursor,
'uid': uid,
'report_name': _('Journals'),
'display_account_raw': self._get_display_account_raw,
'filter_form': self._get_filter,
'target_move': self._get_target_move,
'initial_balance': self._get_initial_balance,
'amount_currency': self._get_amount_currency,
'display_partner_account': self._get_display_partner_account,
'display_target_move': self._get_display_target_move,
'journals': self._get_journals_br,
'additional_args': [
('--header-font-name', 'Helvetica'),
('--footer-font-name', 'Helvetica'),
('--header-font-size', '10'),
('--footer-font-size', '6'),
('--header-left', header_report_name),
('--header-spacing', '2'),
('--footer-left', footer_date_time),
('--footer-right', ' '.join((_('Page'), '[page]', _('of'),
'[topage]'))),
('--footer-line',),
],
})
def set_context(self, objects, data, ids, report_type=None):
"""Populate a ledger_lines attribute on each browse record that will
be used by mako template"""
# Reading form
main_filter = self._get_form_param('filter', data, default='filter_no')
target_move = self._get_form_param('target_move', data, default='all')
start_date = self._get_form_param('date_from', data)
stop_date = self._get_form_param('date_to', data)
start_period = self.get_start_period_br(data)
stop_period = self.get_end_period_br(data)
fiscalyear = self.get_fiscalyear_br(data)
journal_ids = self._get_form_param('journal_ids', data)
chart_account = self._get_chart_account_id_br(data)
account_period_obj = self.pool.get('account.period')
domain = [('journal_id', 'in', journal_ids)]
if main_filter == 'filter_no':
domain += [
('date', '>=',
self.get_first_fiscalyear_period(fiscalyear).date_start),
('date', '<=',
self.get_last_fiscalyear_period(fiscalyear).date_stop),
]
# computation of move lines
elif main_filter == 'filter_date':
domain += [
('date', '>=', start_date),
('date', '<=', stop_date),
]
elif main_filter == 'filter_period':
period_ids = account_period_obj.build_ctx_periods(self.cursor,
self.uid,
start_period.id,
stop_period.id)
domain = [
('period_id', 'in', period_ids),
]
if target_move == 'posted':
domain += [('state', '=', 'posted')]
account_journal_period_obj = self.pool.get('account.journal.period')
new_ids = account_journal_period_obj.search(self.cursor, self.uid, [
('journal_id', 'in', journal_ids),
('period_id', 'in', period_ids),
])
objects = account_journal_period_obj.browse(self.cursor, self.uid,
new_ids)
# Sort by journal and period
objects.sorted(key=lambda a: (a.journal_id.code,
a.period_id.date_start))
move_obj = self.pool.get('account.move')
moves = {}
for journal_period in objects:
domain_arg = [
('journal_id', '=', journal_period.journal_id.id),
('period_id', '=', journal_period.period_id.id),
]
if target_move == 'posted':
domain_arg += [('state', '=', 'posted')]
move_ids = move_obj.search(self.cursor, self.uid, domain_arg,
order="name")
moves[journal_period.id] = move_obj.browse(self.cursor, self.uid,
move_ids)
# Sort account move line by account accountant
for move in moves[journal_period.id]:
move.line_id.sorted(key=lambda a: (a.date, a.account_id.code))
self.localcontext.update({
'fiscalyear': fiscalyear,
'start_date': start_date,
'stop_date': stop_date,
'start_period': start_period,
'stop_period': stop_period,
'chart_account': chart_account,
'moves': moves,
})
return super(PrintJournalWebkit, self).set_context(
objects, data, new_ids, report_type=report_type)
HeaderFooterTextWebKitParser(
'report.account.account_report_print_journal_webkit',
'account.journal.period',
'addons/account_financial_report_webkit/report/templates/\
account_report_print_journal.mako',
parser=PrintJournalWebkit)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| lgpl-3.0 | -6,254,074,307,570,243,000 | 44.266272 | 79 | 0.533333 | false |
beeftornado/sentry | tests/sentry/mail/activity/test_release.py | 1 | 11594 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.core import mail
from django.utils import timezone
from sentry.models import (
Activity,
Commit,
CommitAuthor,
Deploy,
Environment,
GroupSubscriptionReason,
Release,
ReleaseCommit,
Repository,
UserEmail,
UserOption,
UserOptionValue,
)
from sentry.mail.activity.release import ReleaseActivityEmail
from sentry.testutils import TestCase
class ReleaseTestCase(TestCase):
def setUp(self):
super(ReleaseTestCase, self).setUp()
self.user = self.create_user("[email protected]")
assert UserEmail.objects.filter(user=self.user, email=self.user.email).update(
is_verified=True
)
self.user2 = self.create_user("[email protected]")
assert UserEmail.objects.filter(user=self.user2, email=self.user2.email).update(
is_verified=True
)
self.user3 = self.create_user("[email protected]")
assert UserEmail.objects.filter(user=self.user3, email=self.user3.email).update(
is_verified=True
)
self.user4 = self.create_user("[email protected]")
assert UserEmail.objects.filter(user=self.user4, email=self.user4.email).update(
is_verified=True
)
self.user5 = self.create_user("[email protected]")
user5_alt_email = "[email protected]"
UserEmail.objects.create(email=user5_alt_email, user=self.user5)
assert UserEmail.objects.filter(user=self.user5, email=self.user5.email).update(
is_verified=True
)
assert UserEmail.objects.filter(user=self.user5, email=user5_alt_email).update(
is_verified=True
)
self.org = self.create_organization(owner=None)
self.org.flags.allow_joinleave = False
self.org.save()
self.team = self.create_team(organization=self.org)
self.team2 = self.create_team(organization=self.org)
self.create_member(user=self.user, organization=self.org, teams=[self.team])
self.create_member(user=self.user2, organization=self.org)
self.create_member(user=self.user3, organization=self.org, teams=[self.team])
self.create_member(user=self.user4, organization=self.org, teams=[self.team])
self.create_member(user=self.user5, organization=self.org, teams=[self.team])
self.project = self.create_project(organization=self.org, teams=[self.team])
self.project2 = self.create_project(organization=self.org, teams=[self.team2])
self.release = Release.objects.create(
version="a" * 40,
organization_id=self.project.organization_id,
date_released=timezone.now(),
)
self.release.add_project(self.project)
self.release.add_project(self.project2)
self.environment = Environment.objects.create(
name="production", organization_id=self.org.id
)
self.deploy = Deploy.objects.create(
release=self.release, organization_id=self.org.id, environment_id=self.environment.id
)
repository = Repository.objects.create(organization_id=self.org.id, name=self.project.name)
self.commit = Commit.objects.create(
key="a" * 40,
repository_id=repository.id,
organization_id=self.org.id,
author=CommitAuthor.objects.create(
organization_id=self.org.id, name=self.user.name, email=self.user.email
),
)
self.commit2 = Commit.objects.create(
key="b" * 40,
repository_id=repository.id,
organization_id=self.org.id,
author=CommitAuthor.objects.create(
organization_id=self.org.id, name=self.user2.name, email=self.user2.email
),
)
self.commit3 = Commit.objects.create(
key="c" * 40,
repository_id=repository.id,
organization_id=self.org.id,
author=CommitAuthor.objects.create(
organization_id=self.org.id, name=self.user4.name, email=self.user4.email
),
)
self.commit4 = Commit.objects.create(
key="e" * 40,
repository_id=repository.id,
organization_id=self.org.id,
author=CommitAuthor.objects.create(
organization_id=self.org.id, name=self.user5.name, email=user5_alt_email
),
)
ReleaseCommit.objects.create(
organization_id=self.project.organization_id,
release=self.release,
commit=self.commit,
order=0,
)
ReleaseCommit.objects.create(
organization_id=self.project.organization_id,
release=self.release,
commit=self.commit2,
order=1,
)
ReleaseCommit.objects.create(
organization_id=self.project.organization_id,
release=self.release,
commit=self.commit3,
order=2,
)
ReleaseCommit.objects.create(
organization_id=self.project.organization_id,
release=self.release,
commit=self.commit4,
order=3,
)
UserOption.objects.set_value(
user=self.user3,
organization=self.org,
key="deploy-emails",
value=UserOptionValue.all_deploys,
)
UserOption.objects.set_value(
user=self.user4,
organization=self.org,
key="deploy-emails",
value=UserOptionValue.no_deploys,
)
# added to make sure org default above takes precedent
UserOption.objects.set_value(
user=self.user4,
organization=None,
key="deploy-emails",
value=UserOptionValue.all_deploys,
)
def test_simple(self):
email = ReleaseActivityEmail(
Activity(
project=self.project,
user=self.user,
type=Activity.RELEASE,
data={"version": self.release.version, "deploy_id": self.deploy.id},
)
)
# user is included because they committed
# user2 committed but isn't in a team associated with the project.
# user3 is included because they oped into all deploy emails
# user4 committed but isn't included because they opted out of all deploy emails
# for that org -- also tests to make sure org overrides default preference
# user5 committed with another email address and is still included.
assert len(email.get_participants()) == 3
assert email.get_participants() == {
self.user: GroupSubscriptionReason.committed,
self.user3: GroupSubscriptionReason.deploy_setting,
self.user5: GroupSubscriptionReason.committed,
}
context = email.get_context()
assert context["environment"] == "production"
assert context["repos"][0]["commits"] == [
(self.commit, self.user),
(self.commit2, self.user2),
(self.commit3, self.user4),
(self.commit4, self.user5),
]
user_context = email.get_user_context(self.user)
# make sure this only includes projects user has access to
assert len(user_context["projects"]) == 1
assert user_context["projects"][0][0] == self.project
with self.tasks():
email.send()
assert len(mail.outbox) == 3
sent_email_addresses = {msg.to[0] for msg in mail.outbox}
assert sent_email_addresses == {self.user.email, self.user3.email, self.user5.email}
def test_doesnt_generate_on_no_release(self):
email = ReleaseActivityEmail(
Activity(
project=self.project,
user=self.user,
type=Activity.RELEASE,
data={"version": "a", "deploy_id": 5},
)
)
assert email.release is None
assert not email.should_email()
def test_no_committers(self):
release = Release.objects.create(
version="b" * 40,
organization_id=self.project.organization_id,
date_released=timezone.now(),
)
release.add_project(self.project)
release.add_project(self.project2)
deploy = Deploy.objects.create(
release=release, organization_id=self.org.id, environment_id=self.environment.id
)
email = ReleaseActivityEmail(
Activity(
project=self.project,
user=self.user,
type=Activity.RELEASE,
data={"version": release.version, "deploy_id": deploy.id},
)
)
# only user3 is included because they oped into all deploy emails
assert len(email.get_participants()) == 1
assert email.get_participants() == {self.user3: GroupSubscriptionReason.deploy_setting}
context = email.get_context()
assert context["environment"] == "production"
assert context["repos"] == []
user_context = email.get_user_context(self.user)
# make sure this only includes projects user has access to
assert len(user_context["projects"]) == 1
assert user_context["projects"][0][0] == self.project
with self.tasks():
email.send()
assert len(mail.outbox) == 1
sent_email_addresses = {msg.to[0] for msg in mail.outbox}
assert sent_email_addresses == {self.user3.email}
def test_uses_default(self):
user6 = self.create_user()
self.create_member(user=user6, organization=self.org, teams=[self.team])
UserOption.objects.set_value(
user=user6, organization=None, key="deploy-emails", value=UserOptionValue.all_deploys
)
release = Release.objects.create(
version="b" * 40,
organization_id=self.project.organization_id,
date_released=timezone.now(),
)
release.add_project(self.project)
release.add_project(self.project2)
deploy = Deploy.objects.create(
release=release, organization_id=self.org.id, environment_id=self.environment.id
)
email = ReleaseActivityEmail(
Activity(
project=self.project,
user=self.user,
type=Activity.RELEASE,
data={"version": release.version, "deploy_id": deploy.id},
)
)
# user3 and user 6 are included because they oped into all deploy emails
# (one on an org level, one as their default)
assert len(email.get_participants()) == 2
assert email.get_participants() == {
user6: GroupSubscriptionReason.deploy_setting,
self.user3: GroupSubscriptionReason.deploy_setting,
}
context = email.get_context()
assert context["environment"] == "production"
assert context["repos"] == []
user_context = email.get_user_context(user6)
# make sure this only includes projects user has access to
assert len(user_context["projects"]) == 1
assert user_context["projects"][0][0] == self.project
with self.tasks():
email.send()
assert len(mail.outbox) == 2
sent_email_addresses = {msg.to[0] for msg in mail.outbox}
assert sent_email_addresses == {self.user3.email, user6.email}
| bsd-3-clause | 3,656,528,274,749,774,300 | 34.347561 | 99 | 0.599103 | false |
endlessm/chromium-browser | third_party/grpc/src/src/python/grpcio_tests/tests/_loader.py | 24 | 3776 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import importlib
import pkgutil
import re
import unittest
import coverage
TEST_MODULE_REGEX = r'^.*_test$'
class Loader(object):
"""Test loader for setuptools test suite support.
Attributes:
suite (unittest.TestSuite): All tests collected by the loader.
loader (unittest.TestLoader): Standard Python unittest loader to be ran per
module discovered.
module_matcher (re.RegexObject): A regular expression object to match
against module names and determine whether or not the discovered module
contributes to the test suite.
"""
def __init__(self):
self.suite = unittest.TestSuite()
self.loader = unittest.TestLoader()
self.module_matcher = re.compile(TEST_MODULE_REGEX)
def loadTestsFromNames(self, names, module=None):
"""Function mirroring TestLoader::loadTestsFromNames, as expected by
setuptools.setup argument `test_loader`."""
# ensure that we capture decorators and definitions (else our coverage
# measure unnecessarily suffers)
coverage_context = coverage.Coverage(data_suffix=True)
coverage_context.start()
imported_modules = tuple(
importlib.import_module(name) for name in names)
for imported_module in imported_modules:
self.visit_module(imported_module)
for imported_module in imported_modules:
try:
package_paths = imported_module.__path__
except AttributeError:
continue
self.walk_packages(package_paths)
coverage_context.stop()
coverage_context.save()
return self.suite
def walk_packages(self, package_paths):
"""Walks over the packages, dispatching `visit_module` calls.
Args:
package_paths (list): A list of paths over which to walk through modules
along.
"""
for importer, module_name, is_package in (
pkgutil.walk_packages(package_paths)):
module = importer.find_module(module_name).load_module(module_name)
self.visit_module(module)
def visit_module(self, module):
"""Visits the module, adding discovered tests to the test suite.
Args:
module (module): Module to match against self.module_matcher; if matched
it has its tests loaded via self.loader into self.suite.
"""
if self.module_matcher.match(module.__name__):
module_suite = self.loader.loadTestsFromModule(module)
self.suite.addTest(module_suite)
def iterate_suite_cases(suite):
"""Generator over all unittest.TestCases in a unittest.TestSuite.
Args:
suite (unittest.TestSuite): Suite to iterate over in the generator.
Returns:
generator: A generator over all unittest.TestCases in `suite`.
"""
for item in suite:
if isinstance(item, unittest.TestSuite):
for child_item in iterate_suite_cases(item):
yield child_item
elif isinstance(item, unittest.TestCase):
yield item
else:
raise ValueError('unexpected suite item of type {}'.format(
type(item)))
| bsd-3-clause | 6,738,726,140,154,157,000 | 34.622642 | 79 | 0.671345 | false |
99cloud/keystone_register | openstack_dashboard/dashboards/project/instances/forms.py | 2 | 1792 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import api
from horizon import exceptions
from horizon import forms
from horizon import messages
LOG = logging.getLogger(__name__)
class UpdateInstance(forms.SelfHandlingForm):
tenant_id = forms.CharField(widget=forms.HiddenInput)
instance = forms.CharField(widget=forms.HiddenInput)
name = forms.CharField(required=True)
def handle(self, request, data):
try:
server = api.server_update(request, data['instance'], data['name'])
messages.success(request,
_('Instance "%s" updated.') % data['name'])
return server
except:
redirect = reverse("horizon:project:instances:index")
exceptions.handle(request,
_('Unable to update instance.'),
redirect=redirect)
| apache-2.0 | 4,758,281,869,034,314,000 | 34.137255 | 79 | 0.685268 | false |
walidsa3d/dupee | dupee/dupee.py | 1 | 2910 |
import argparse
import re
from collections import defaultdict
from os import listdir
from os.path import abspath
from os.path import isfile
from os.path import join
from PIL import Image
import os
from . import __version__
from filehash import FileHash
from imagehash import dhash
from termcolor import colored
class FileObject(object):
pass
class Dupee(object):
def list_files_recursive(self, rootdir):
filelist = []
fileobjs = []
for root, dirname, filenames in os.walk(rootdir):
for name in filenames:
filepath = os.path.join(root, name)
filelist.append(filepath)
for f in filelist:
fileobj = FileObject()
fileobj.path = abspath(f)
fileobj.hash = self.imhash(f) if self.isimage(
f) else FileHash(f).md5()
fileobjs.append(fileobj)
return [f for f in fileobjs if f.hash is not None]
def list_files(self, rootdir, recursive=False):
if recursive:
return self.list_files_recursive(rootdir)
fileobjs = []
filelist = [join(rootdir, f)
for f in listdir(rootdir) if isfile(join(rootdir, f))]
for f in filelist:
fileobj = FileObject()
fileobj.path = abspath(f)
fileobj.hash = self.imhash(f) if self.isimage(
f) else FileHash(f).md5()
fileobjs.append(fileobj)
return [f for f in fileobjs if f.hash is not None]
def isimage(self, f):
"""check if given file is image using its extension"""
imgregex = re.compile(r'\.(jpe?g|png|gif|bmp)$')
# return bool(imgregex.search(f))
if imgregex.search(f):
return True
return False
def imhash(self, pic):
"""use dhash algorithm to hash image"""
try:
image = Image.open(pic)
h = str(dhash(image))
except:
return None
return h
def dedupe(self, filelist):
"""return a list of duplicate files found in filelist"""
d = defaultdict(list)
for f in filelist:
d[f.hash].append(f.path)
dupes = {k: v for k, v in d.iteritems() if len(v) >= 2}
return dupes
def main(self):
parser = argparse.ArgumentParser(usage="-h for full usage",prog="Dupee")
parser.add_argument('-V', '--version', action='version', version=__version__)
parser.add_argument('-r', '--recursive', action="store_true", help="scan directory recursively")
parser.add_argument('rootdir', help='source directory')
args = parser.parse_args()
files = self.list_files(args.rootdir, recursive=args.recursive)
d = self.dedupe(files)
for v in d.values():
print colored(v[0], 'green')
for d in v[1:]:
print "---->", colored(d, 'red')
| mit | -1,783,298,262,710,570,200 | 30.630435 | 104 | 0.582818 | false |
bmazin/ARCONS-pipeline | util/MKIDStd.py | 1 | 14531 | import os
import glob
import matplotlib.pyplot as plt
import numpy
import types
import string
import pyfits
from util import smooth
import sys
from scipy.constants import *
import math
class MKIDStd:
"""
This class contains the spectra of several standard stars and other
objects. These spectra may be plotted and used to compare with data
from the MKID detector.
Wavelength and flux values and text files describing each object are
saved in the data directory. Each object is described in a .txt file.
This file lists the file name that contains the data, the units, a
citation, and a brief description of the object.
"""
def __init__(self, referenceWavelength=5500):
"""
Loads up the list of objects we know about, filters, and
Balmer wavelengths.
referenceWavelength is used in plot() to normalize spectra
"""
self.referenceWavelength=referenceWavelength
self.objects = {}
self.filters = {}
self.filterList = ['U','B','V','R','I','u','g','r','i','z']
self.this_dir, this_filename = os.path.split(__file__)
pattern = os.path.join(self.this_dir,"data","*.txt")
for file in glob.glob(pattern):
name,ext = os.path.splitext(os.path.basename(file))
dictionary = self._loadDictionary(file)
self.objects[name] = dictionary
self.balmerwavelengths = [6563,4861,4341,4102,3970,3889,3835,3646]
self.lymanwavelengths = [1216,1026,973,950,938,931,926,923,921,919]
self._loadUBVRIFilters()
self._loadSDSSFilters()
# h is in Joules/sec and c is in meters/sec.
# This k value is used in conversions between counts and ergs
self.k = ((1.0*10**-10)/(1.0*10**7))/h/c
self.vegaInCounts = "not loaded yet"
def _loadFilter(self, filterName):
if filterName in ['U','B','V','R','I']:
print "Loading Johnson %s filter"%filterName
self._loadUBVRIFilters()
elif filterName in ['u','g','r','i','z']:
print "Loading SDSS %s filter"%filterName
self._loadSDSSFilters()
else:
raise ValueError("INVALID FILTER. Currently supported filters:", self.filterList)
wvls = self.filters[filterName][0]
trans = self.filters[filterName][1]
return wvls, trans
def _loadUBVRIFilters(self):
filterFileName = os.path.join(self.this_dir,"data","ph08_UBVRI.mht")
f = open(filterFileName,'r')
nFilter = -1
nToRead = -1
iFilter = -1
iRead = 0
for line in f:
if (nFilter == -1) :
nFilter = int(line)
elif (nToRead <= 0):
nToRead = int(line)
iFilter += 1
filter = self.filterList[iFilter]
self.filters[filter] = numpy.zeros((2,nToRead))
iRead = 0
else:
nToRead -= 1
vals = line.split()
self.filters[filter][0,iRead] = vals[0]
self.filters[filter][1,iRead] = vals[1]
iRead += 1
def _loadSDSSFilters(self):
for filter in ['u','g','i','r','z']:
filterFileName = os.path.join(self.this_dir,"data",filter+'.mht')
temp = numpy.loadtxt(filterFileName)
npts = temp.shape[0]
self.filters[filter] = numpy.zeros((2,npts))
for i in range(npts):
self.filters[filter][0,i] = temp[i,0]
self.filters[filter][1,i] = temp[i,3]
def _loadDictionary(self,file):
retval = {}
for line in open(file):
vals = line.strip().split(" = ");
retval[vals[0]] = vals[1:]
return retval
def load(self,name):
"""
Returns a two dimensional numpy array where a[:,0] is
wavelength in Angstroms and a[:,1] is flux in
counts/sec/angstrom/cm^2
Noisy spectra are smoothed with window_len in the .txt file.
Ergs and AB Mag units are automatically converted to counts.
"""
fname = self.objects[name]['dataFile']
fullFileName = os.path.join(self.this_dir,"data",fname[0])
if (string.count(fullFileName,"fit")):
a = self.loadSdssSpecFits(fullFileName)
else:
a = numpy.loadtxt(fullFileName)
len = int(self.objects[name]['window_len'][0])
if len > 1:
#temp = smooth.smooth(a[:,1], window_len=len)[len/2:-(len/2)]
temp = smooth.smooth(a[:,1], window_len=len)
a[:,1] = temp[1:]
try:
fluxUnit = self.objects[name]['fluxUnit'][0]
scale = float(fluxUnit.split()[0])
a[:,1] *= scale
except ValueError:
scale = 1
ergs = string.count(self.objects[name]['fluxUnit'][0],"ergs")
if ergs:
a[:,1] *= (a[:,0] * self.k)
mag = string.count(self.objects[name]['fluxUnit'][0],"mag")
if mag:
a[:,1] = \
(10**(-2.406/2.5))*(10**(-0.4*a[:,1]))/(a[:,0]**2) * \
(a[:,0] * self.k)
return a
def normalizeFlux(self,a):
"""
This function normalizes the flux at self.referenceWavelength
"""
referenceFlux = self.getFluxAtReferenceWavelength(a)
a[:,1] /= referenceFlux
return a
def countsToErgs(self,a):
"""
This function changes the units of the spectra from counts to
ergs.
"""
a[:,1] /= (a[:,0] * self.k)
return a
def ergsToCounts(self,a):
"""
This function changes the units of the spectra from ergs to
counts.
"""
a[:,1] *= (a[:,0] * self.k)
return a
def measureBandPassFlux(self,aFlux,aFilter):
"""
This function measures the band pass flux of the object in the
filter.
"""
sum = 0
sumd = 0
filter = numpy.interp(aFlux[:,0], aFilter[0,:], aFilter[1,:], 0, 0)
for i in range(aFlux[:,0].size-1):
dw = aFlux[i+1,0] - aFlux[i,0]
flux = aFlux[i,1]*filter[i]/aFlux[i,0]
sum += flux*dw
sumd += filter[i]*dw
sum /= self.k
sum /= sumd
return sum
def _getVegaMag(self, aFlux, aFilter):
#if self.vegaInCounts == "not loaded yet":
self.vegaInCounts = self.load("vega")
sumNumerator = 0.0
sumDenominator = 0.0
filter = numpy.interp(aFlux[:,0], aFilter[0,:], aFilter[1,:], 0, 0)
vFlux = numpy.interp(
aFlux[:,0], self.vegaInCounts[:,0], self.vegaInCounts[:,1], 0, 0)
for i in range(aFlux[:,0].size-1):
dw = aFlux[i+1,0] - aFlux[i,0]
sumNumerator += aFlux[i,1]*filter[i]*dw
sumDenominator += vFlux[i]*filter[i]*dw
#print "i=%4d filter=%5.3f flux=%f" % (i,filter[i], aFlux[i,1])
#print " sumNumerator=",sumNumerator
#print " sumDenominator=",sumDenominator
mag = -2.5*math.log10(sumNumerator/sumDenominator) + 0.03
return mag
def getVegaMag(self,name,Filter):
"""
Returns the magnitude of the desired object at the desired filter.
"""
aFlux = self.load(name)
aFilter = self.filters[Filter]
a = self._getVegaMag(aFlux, aFilter)
return a
def plot(self,name="all",xlog=False,ylog=True,xlim=[3000,13000],normalizeFlux=True,countsToErgs=False):
"""
Makes a png file that plots the arrays a[:,0] (wavelength) and
a[:,1] (flux) with balmer wavelengths indicated. Individual
spectra are labeled and indicated by a legend.
plot() plots the spectrum of all standard stars in the program.
plot(['vega','bd17']) returns only the spectrum for those two
stars.
plot('vega') plots the spectrum for only that star.
Whether the axes are plotted logaritmically is controlled by the
option parameter xlog and ylog.
The optional parameter xlim sets the wavelength limits of the plot.
The plot y limits are from flux values for wavelengths included
in xlim.
The flux values are in counts/sec/cm^2/A by default, but they can be
changed to ergs by setting countsToErgs=True when calling the
function.
By default fluxes are normalized to 1 at self.refernceWavelength
and setting normalizeFlux=False disables normalization
The filename of the plot in the current working director is returned.
"""
if (name == "all"):
listofobjects = self.objects.keys()
listofobjects.sort()
plotName = "all"
elif (isinstance(name, types.ListType)):
listofobjects = name
plotName = name[0]+"_group"
else:
plotName = name
listofobjects = [name]
plt.clf()
plotYMin = -1
plotYMax = -1
for tname in listofobjects:
a = self.load(tname)
if (countsToErgs):
a = self.countsToErgs(a)
if (normalizeFlux):
a = self.normalizeFlux(a)
a.shape
x = a[:,0]
y = a[:,1]
if (not xlog and ylog):
plt.semilogy(x,y, label=tname)
if (not ylog and xlog):
plt.semilogx(x,y, label=tname)
if (not xlog and not ylog):
plt.plot(x,y, label=tname)
if (xlog and ylog):
plt.loglog(x,y, label=tname)
imin = numpy.searchsorted(x,xlim[0])
imax = numpy.searchsorted(x,xlim[1])
ytemp = y[imin:imax]
ymin = abs(ytemp).min()
ymax = ytemp.max()
if (plotYMin == -1):
plotYMin = ymin
plotYMax = ymax
else:
plotYMin = min(plotYMin,ymin)
plotYMax = max(plotYMax,ymax)
for x in self.balmerwavelengths:
plt.plot([x,x],[plotYMin,plotYMax], 'r--')
plt.xlabel('wavelength(Angstroms)')
if (countsToErgs):
ylabel = 'flux(ergs/sec/cm2/A)'
else:
ylabel = 'flux(counts/sec/cm2/A)'
if (normalizeFlux):
ylabel += '['+str(self.referenceWavelength)+']'
plt.ylabel(ylabel)
ax = plt.subplot(111)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(bbox_to_anchor=(1.05,1), loc=2, prop={'size':10}, borderaxespad=0.)
plt.xlim(xlim)
plt.ylim([plotYMin,plotYMax])
fullPlotName = plotName+'.png'
plt.savefig(fullPlotName)
return fullPlotName
def plotFilters(self):
"""
Plots all filters. This includes both the UBVRI and the SDSS
filters.
Note that the array is reversed, compared to that used
by the plot() function, so wavelength values are in
self.filters[filterName][0,:] and the relative transmission is
self.filters[filterName][1,:]
The plot made by the filters is saved as filters.png
"""
plt.clf()
listoffilters = self.filterList
for filter in listoffilters:
a = self.filters[filter]
y = a[1,:]
x = a[0,:]
plt.plot(x,y, label=filter)
plt.legend()
#plt.show()
plt.savefig('filters'+'.png')
def getFluxAtReferenceWavelength(self, a):
"""
returns the flux value at self.referenceWavelength
"""
x = a[:,0]
y = a[:,1]
index = numpy.searchsorted(x, self.referenceWavelength);
if index < 0:
index = 0
if index > x.size - 1:
index = x.size - 1
return y[index]
def showUnits(self):
"""
Returns flux units from the original data files for the spectra
of all objects.
"""
for name in self.objects.keys():
fluxUnit = self.objects[name]['fluxUnit']
print name, " ", fluxUnit
def loadSdssSpecFits(self, fullFileName):
"""
Allows spectral data from a SDSS fits file to be read into the
program
"""
f = pyfits.open(fullFileName)
coeff0 = f[0].header['COEFF0']
coeff1 = f[0].header['COEFF1']
n = len(f[1].data)
retval = numpy.zeros([n,2])
retval[:,0] = numpy.arange(n)
retval[:,0] = 10**(coeff0+coeff1*retval[:,0])
for i in range(n):
retval[i][1] = f[1].data[i][0]
return retval
def rep2(self):
names = self.objects.keys()
names.sort()
for name in names:
print "name=",name
vMag = self.getVegaMag(name,'V')
print "name=%15s vMag=%+f" % (name, vMag)
def report(self):
"""
Creates a text document called Report.log that reports the units,
citation, magnitude, and description of each object.
"""
old_stdout = sys.stdout
log_file = open("Report.log","w")
print "sys.stdout=",sys.stdout
sys.stdout = log_file
names = self.objects.keys()
names.sort()
for name in names:
fluxUnit = self.objects[name]['fluxUnit'][0]
wavelengthUnit = self.objects[name]['wavlengthUnit'][0]
citation = self.objects[name]['citation'][0]
description = self.objects[name]['description'][0]
a = self.load(name)
points = a[:,1].size
x = a[:,0]
y = a[:,1]
xmin = x.min()
xmax = x.max()
bMag = self.getVegaMag(name,'B')
vMag = self.getVegaMag(name,'V')
bmv = bMag - vMag
print "----------------------------------------------------------"
print "Name: %s" %name
print "Units: Flux: %s Wavelength: %s " %(fluxUnit, wavelengthUnit)
print "Citation: %s" %citation
print "Description: %s." %description
print "Calculated V=%.2f B-V=%f" % (vMag, bmv)
print "Number of Points: %d Wavelength: Max =%9.3f Min = %10.3f" \
%(points, xmin, xmax)
sys.stdout = old_stdout
log_file.close()
| gpl-2.0 | -110,834,826,776,027,860 | 35.049628 | 107 | 0.537059 | false |
TeachAtTUM/edx-platform | lms/djangoapps/commerce/api/v1/views.py | 5 | 3363 | import logging
from django.contrib.auth.models import User
from django.http import Http404
from edx_rest_api_client import exceptions
from edx_rest_framework_extensions.authentication import JwtAuthentication
from rest_framework.authentication import SessionAuthentication
from rest_framework.generics import ListAPIView, RetrieveUpdateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework_oauth.authentication import OAuth2Authentication
from course_modes.models import CourseMode
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.lib.api.mixins import PutAsCreateMixin
from util.json_request import JsonResponse
from ...utils import is_account_activation_requirement_disabled
from .models import Course
from .permissions import ApiKeyOrModelPermission, IsAuthenticatedOrActivationOverridden
from .serializers import CourseSerializer
log = logging.getLogger(__name__)
class CourseListView(ListAPIView):
""" List courses and modes. """
authentication_classes = (JwtAuthentication, OAuth2Authentication, SessionAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = CourseSerializer
pagination_class = None
def get_queryset(self):
return list(Course.iterator())
class CourseRetrieveUpdateView(PutAsCreateMixin, RetrieveUpdateAPIView):
""" Retrieve, update, or create courses/modes. """
lookup_field = 'id'
lookup_url_kwarg = 'course_id'
model = CourseMode
authentication_classes = (JwtAuthentication, OAuth2Authentication, SessionAuthentication,)
permission_classes = (ApiKeyOrModelPermission,)
serializer_class = CourseSerializer
# Django Rest Framework v3 requires that we provide a queryset.
# Note that we're overriding `get_object()` below to return a `Course`
# rather than a CourseMode, so this isn't really used.
queryset = CourseMode.objects.all()
def get_object(self, queryset=None):
course_id = self.kwargs.get(self.lookup_url_kwarg)
course = Course.get(course_id)
if course:
return course
raise Http404
def pre_save(self, obj):
# There is nothing to pre-save. The default behavior changes the Course.id attribute from
# a CourseKey to a string, which is not desired.
pass
class OrderView(APIView):
""" Retrieve order details. """
authentication_classes = (JwtAuthentication, SessionAuthentication,)
permission_classes = (IsAuthenticatedOrActivationOverridden,)
def get(self, request, number):
""" HTTP handler. """
# If the account activation requirement is disabled for this installation, override the
# anonymous user object attached to the request with the actual user object (if it exists)
if not request.user.is_authenticated() and is_account_activation_requirement_disabled():
try:
request.user = User.objects.get(id=request.session._session_cache['_auth_user_id'])
except User.DoesNotExist:
return JsonResponse(status=403)
try:
order = ecommerce_api_client(request.user).orders(number).get()
return JsonResponse(order)
except exceptions.HttpNotFoundError:
return JsonResponse(status=404)
| agpl-3.0 | -4,316,431,131,490,350,600 | 38.564706 | 99 | 0.736545 | false |
luotao1/Paddle | python/paddle/fluid/tests/unittests/dygraph_to_static/simnet_dygraph_model_v2.py | 1 | 14960 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import paddle
class EmbeddingLayer(object):
"""
Embedding Layer class
"""
def __init__(self, dict_size, emb_dim, name="emb", padding_idx=None):
"""
initialize
"""
self.dict_size = dict_size
self.emb_dim = emb_dim
self.name = name
self.padding_idx = padding_idx
def ops(self):
"""
operation
"""
# TODO(huihuangzheng): The original code set the is_sparse=True, but it
# causes crush in dy2stat. Set it to True after fixing it.
emb = paddle.fluid.dygraph.Embedding(
size=[self.dict_size, self.emb_dim],
is_sparse=True,
padding_idx=self.padding_idx,
param_attr=paddle.ParamAttr(
name=self.name,
initializer=paddle.nn.initializer.XavierUniform()))
return emb
class FCLayer(object):
"""
Fully Connect Layer class
"""
def __init__(self, fc_dim, act, name="fc"):
"""
initialize
"""
self.fc_dim = fc_dim
self.act = act
self.name = name
def ops(self):
"""
operation
"""
fc = FC(size=self.fc_dim,
param_attr=paddle.ParamAttr(name="%s.w" % self.name),
bias_attr=paddle.ParamAttr(name="%s.b" % self.name),
act=self.act)
return fc
class ConcatLayer(object):
"""
Connection Layer class
"""
def __init__(self, axis):
"""
initialize
"""
self.axis = axis
def ops(self, inputs):
"""
operation
"""
concat = paddle.concat(x=inputs, axis=self.axis)
return concat
class ReduceMeanLayer(object):
"""
Reduce Mean Layer class
"""
def __init__(self):
"""
initialize
"""
pass
def ops(self, input):
"""
operation
"""
mean = paddle.mean(input)
return mean
class CosSimLayer(object):
"""
Cos Similarly Calculate Layer
"""
def __init__(self):
"""
initialize
"""
pass
def ops(self, x, y):
"""
operation
"""
sim = paddle.nn.functional.cosine_similarity(x, y)
return sim
class ElementwiseMaxLayer(object):
"""
Elementwise Max Layer class
"""
def __init__(self):
"""
initialize
"""
pass
def ops(self, x, y):
"""
operation
"""
max = paddle.maximum(x=x, y=y)
return max
class ElementwiseAddLayer(object):
"""
Elementwise Add Layer class
"""
def __init__(self):
"""
initialize
"""
pass
def ops(self, x, y):
"""
operation
"""
add = paddle.add(x=x, y=y)
return add
class ElementwiseSubLayer(object):
"""
Elementwise Add Layer class
"""
def __init__(self):
"""
initialize
"""
pass
def ops(self, x, y):
"""
operation
"""
sub = paddle.fluid.layers.elementwise_sub(x, y)
return sub
class ConstantLayer(object):
"""
Generate A Constant Layer class
"""
def __init__(self):
"""
initialize
"""
pass
def ops(self, input, shape, dtype, value):
"""
operation
"""
shape = list(shape)
input_shape = paddle.shape(input)
shape[0] = input_shape[0]
constant = paddle.fluid.layers.fill_constant(shape, dtype, value)
return constant
class SoftsignLayer(object):
"""
Softsign Layer class
"""
def __init__(self):
"""
initialize
"""
pass
def ops(self, input):
"""
operation
"""
softsign = paddle.nn.functional.softsign(input)
return softsign
class FC(paddle.nn.Layer):
r"""
This interface is used to construct a callable object of the ``FC`` class.
For more details, refer to code examples.
It creates a fully connected layer in the network. It can take
one or multiple ``Tensor`` as its inputs. It creates a Variable called weights for each input tensor,
which represents a fully connected weight matrix from each input unit to
each output unit. The fully connected layer multiplies each input tensor
with its corresponding weight to produce an output Tensor with shape [N, `size`],
where N is batch size. If multiple input tensors are given, the results of
multiple output tensors with shape [N, `size`] will be summed up. If ``bias_attr``
is not None, a bias variable will be created and added to the output.
Finally, if ``act`` is not None, it will be applied to the output as well.
When the input is single ``Tensor`` :
.. math::
Out = Act({XW + b})
When the input are multiple ``Tensor`` :
.. math::
Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
In the above equation:
* :math:`N`: Number of the input. N equals to len(input) if input is list of ``Tensor`` .
* :math:`X_i`: The i-th input ``Tensor`` .
* :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.
* :math:`b`: The bias parameter created by this layer (if needed).
* :math:`Act`: The activation function.
* :math:`Out`: The output ``Tensor`` .
See below for an example.
.. code-block:: text
Given:
data_1.data = [[[0.1, 0.2]]]
data_1.shape = (1, 1, 2) # 1 is batch_size
data_2.data = [[[0.1, 0.2, 0.3]]]
data_2.shape = (1, 1, 3) # 1 is batch_size
fc = FC("fc", 2, num_flatten_dims=2)
out = fc(input=[data_1, data_2])
Then:
out.data = [[[0.182996 -0.474117]]]
out.shape = (1, 1, 2)
Parameters:
size(int): The number of output units in this layer.
num_flatten_dims (int, optional): The fc layer can accept an input tensor with more than
two dimensions. If this happens, the multi-dimension tensor will first be flattened
into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input
tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1)
dimensions will be flatten to form the first dimension of the final matrix (height of
the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to
form the second dimension of the final matrix (width of the matrix). For example, suppose
`X` is a 5-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3.
Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1
param_attr (ParamAttr or list of ParamAttr, optional): The parameter attribute for learnable
weights(Parameter) of this layer. Default: None.
bias_attr (ParamAttr or list of ParamAttr, optional): The attribute for the bias
of this layer. If it is set to False, no bias will be added to the output units.
If it is set to None, the bias is initialized zero. Default: None.
act (str, optional): Activation to be applied to the output of this layer. Default: None.
is_test(bool, optional): A flag indicating whether execution is in test phase. Default: False.
dtype(str, optional): Dtype used for weight, it can be "float32" or "float64". Default: "float32".
Attribute:
**weight** (list of Parameter): the learnable weights of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Returns:
None
"""
def __init__(self,
size,
num_flatten_dims=1,
param_attr=None,
bias_attr=None,
act=None,
is_test=False,
dtype="float32"):
super(FC, self).__init__(dtype)
self._size = size
self._num_flatten_dims = num_flatten_dims
self._dtype = dtype
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
self.__w = list()
def _build_once(self, input):
i = 0
for inp, param in self._helper.iter_inputs_and_params(input,
self._param_attr):
input_shape = inp.shape
param_shape = [
reduce(lambda a, b: a * b, input_shape[self._num_flatten_dims:],
1)
] + [self._size]
self.__w.append(
self.add_parameter(
'_w%d' % i,
self.create_parameter(
attr=param,
shape=param_shape,
dtype=self._dtype,
is_bias=False)))
i += 1
size = list([self._size])
self._b = self.create_parameter(
attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True)
# TODO(songyouwei): We should remove _w property
@property
def _w(self, i=0):
return self.__w[i]
@_w.setter
def _w(self, value, i=0):
assert isinstance(self.__w[i], Variable)
self.__w[i].set_value(value)
@property
def weight(self):
if len(self.__w) > 1:
return self.__w
else:
return self.__w[0]
@weight.setter
def weight(self, value):
if len(self.__w) == 1:
self.__w[0] = value
@property
def bias(self):
return self._b
@bias.setter
def bias(self, value):
self._b = value
def forward(self, input):
mul_results = list()
i = 0
for inp, param in self._helper.iter_inputs_and_params(input,
self._param_attr):
tmp = self._helper.create_variable_for_type_inference(self._dtype)
self._helper.append_op(
type="mul",
inputs={"X": inp,
"Y": self.__w[i]},
outputs={"Out": tmp},
attrs={
"x_num_col_dims": self._num_flatten_dims,
"y_num_col_dims": 1
})
i += 1
mul_results.append(tmp)
if len(mul_results) == 1:
pre_bias = mul_results[0]
else:
pre_bias = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="sum",
inputs={"X": mul_results},
outputs={"Out": pre_bias},
attrs={"use_mkldnn": False})
if self._b is not None:
pre_activation = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
self._helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias],
'Y': [self._b]},
outputs={'Out': [pre_activation]},
attrs={'axis': self._num_flatten_dims})
else:
pre_activation = pre_bias
# Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(pre_activation, act=self._act)
class HingeLoss(object):
"""
Hing Loss Calculate class
"""
def __init__(self, conf_dict):
"""
initialize
"""
self.margin = conf_dict["loss"]["margin"]
def compute(self, pos, neg):
"""
compute loss
"""
elementwise_max = ElementwiseMaxLayer()
elementwise_add = ElementwiseAddLayer()
elementwise_sub = ElementwiseSubLayer()
constant = ConstantLayer()
reduce_mean = ReduceMeanLayer()
loss = reduce_mean.ops(
elementwise_max.ops(
constant.ops(neg, neg.shape, "float32", 0.0),
elementwise_add.ops(
elementwise_sub.ops(neg, pos),
constant.ops(neg, neg.shape, "float32", self.margin))))
return loss
class BOW(paddle.nn.Layer):
"""
BOW
"""
def __init__(self, conf_dict):
"""
initialize
"""
super(BOW, self).__init__()
self.dict_size = conf_dict["dict_size"]
self.task_mode = conf_dict["task_mode"]
self.emb_dim = conf_dict["net"]["emb_dim"]
self.bow_dim = conf_dict["net"]["bow_dim"]
self.seq_len = conf_dict["seq_len"]
self.emb_layer = EmbeddingLayer(self.dict_size, self.emb_dim,
"emb").ops()
self.bow_layer = paddle.nn.Linear(
in_features=self.bow_dim, out_features=self.bow_dim)
self.bow_layer_po = FCLayer(self.bow_dim, None, "fc").ops()
self.softmax_layer = FCLayer(2, "softmax", "cos_sim").ops()
@paddle.jit.to_static
def forward(self, left, right):
"""
Forward network
"""
# embedding layer
left_emb = self.emb_layer(left)
right_emb = self.emb_layer(right)
left_emb = paddle.reshape(
left_emb, shape=[-1, self.seq_len, self.bow_dim])
right_emb = paddle.reshape(
right_emb, shape=[-1, self.seq_len, self.bow_dim])
bow_left = paddle.fluid.layers.reduce_sum(left_emb, dim=1)
bow_right = paddle.fluid.layers.reduce_sum(right_emb, dim=1)
softsign_layer = SoftsignLayer()
left_soft = softsign_layer.ops(bow_left)
right_soft = softsign_layer.ops(bow_right)
# matching layer
if self.task_mode == "pairwise":
left_bow = self.bow_layer(left_soft)
right_bow = self.bow_layer(right_soft)
cos_sim_layer = CosSimLayer()
pred = cos_sim_layer.ops(left_bow, right_bow)
return left_bow, pred
else:
concat_layer = ConcatLayer(1)
concat = concat_layer.ops([left_soft, right_soft])
concat_fc = self.bow_layer_po(concat)
pred = self.softmax_layer(concat_fc)
return left_soft, pred
| apache-2.0 | -226,043,994,514,461,600 | 29.283401 | 106 | 0.536832 | false |
hjanime/VisTrails | vistrails/core/vistrail/module_control_param.py | 1 | 5714 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from vistrails.db.domain import DBControlParameter
import unittest
import copy
class ModuleControlParam(DBControlParameter):
# Valid control parameters should be put here
LOOP_KEY = 'loop_type' # How input lists are combined
WHILE_COND_KEY = 'while_cond' # Run module in a while loop
WHILE_INPUT_KEY = 'while_input' # input port for forwarded value
WHILE_OUTPUT_KEY = 'while_output' # output port for forwarded value
WHILE_MAX_KEY = 'while_max' # Max iterations
WHILE_DELAY_KEY = 'while_delay' # delay between iterations
CACHE_KEY = 'cache' # Turn caching on/off for this module (not implemented)
JOB_CACHE_KEY = 'job_cache' # Always persist output values to disk
##########################################################################
# Constructors and copy
def __init__(self, *args, **kwargs):
DBControlParameter.__init__(self, *args, **kwargs)
if self.id is None:
self.id = -1
def __copy__(self):
return ModuleControlParam.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBControlParameter.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = ModuleControlParam
return cp
@staticmethod
def convert(_control_parameter):
_control_parameter.__class__ = ModuleControlParam
##########################################################################
# Properties
id = DBControlParameter.db_id
name = DBControlParameter.db_name
value = DBControlParameter.db_value
##########################################################################
# Operators
def __str__(self):
"""__str__() -> str - Returns a string representation of an ModuleControlParam
object.
"""
rep = "<controlParameter id=%s name=%s value=%s</controlParameter>"
return rep % (str(self.id), str(self.name), str(self.value))
def __eq__(self, other):
""" __eq__(other: ModuleControlParam) -> boolean
Returns True if self and other have the same attributes. Used by ==
operator.
"""
if type(self) != type(other):
return False
if self.name != other.name:
return False
if self.value != other.value:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
################################################################################
# Unit tests
class TestModuleControlParam(unittest.TestCase):
def create_control_parameter(self, id_scope=None):
from vistrails.db.domain import IdScope
if id_scope is None:
id_scope = IdScope()
control_parameter = ModuleControlParam(id=id_scope.getNewId(ModuleControlParam.vtType),
name='name %s',
value='some value %s')
return control_parameter
def test_copy(self):
from vistrails.db.domain import IdScope
id_scope = IdScope()
a1 = self.create_control_parameter(id_scope)
a2 = copy.copy(a1)
self.assertEquals(a1, a2)
self.assertEquals(a1.id, a2.id)
a3 = a1.do_copy(True, id_scope, {})
self.assertEquals(a1, a3)
self.assertNotEquals(a1.id, a3.id)
def test_serialization(self):
import vistrails.core.db.io
a1 = self.create_control_parameter()
xml_str = vistrails.core.db.io.serialize(a1)
a2 = vistrails.core.db.io.unserialize(xml_str, ModuleControlParam)
self.assertEquals(a1, a2)
self.assertEquals(a1.id, a2.id)
def test_str(self):
a1 = self.create_control_parameter()
str(a1)
| bsd-3-clause | 6,569,205,747,210,331,000 | 37.608108 | 95 | 0.60763 | false |
Cosiroc/bleau-database | FlaskWebApplication/Statistics.py | 2 | 6608 | ####################################################################################################
#
# Bleau Database - A database of the bouldering area of Fontainebleau
# Copyright (C) 2015 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
import hashlib
from io import StringIO
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
## WSGI Hang
## print(bokeh ...)
## # from bokeh.plotting import figure
## from bokeh.embed import components
## from bokeh.charts import Bar
## from bokeh.charts.attributes import CatAttr
## print(bokeh done)
####################################################################################################
from BleauDataBase.Statistics import CircuitStatistics
####################################################################################################
class BokehPlot:
##############################################
def __init__(self, *args, **kwargs):
script, div = components(*args, **kwargs)
self.script = script
self.div = div
####################################################################################################
class SvgPlot:
##############################################
def __init__(self, svg):
self.script = ''
self.div = svg
####################################################################################################
class CircuitStatisticsData:
##############################################
def __init__(self, circuits):
self._circuit_statistics = CircuitStatistics(circuits)
self._circuit_grade_barchart = self._make_barchart(self._circuit_statistics.circuit_grade_histogram,
'Cotation des Circuits') # Circuit Grade
self._global_boulder_grade_barchart = self._make_barchart(self._circuit_statistics.global_boulder_grade_histogram,
'Cotations des Blocs') # Boulder Grade
self._boulder_grade_barchart_map = {grade:self._make_barchart(self._circuit_statistics.boulder_grade_histogram(grade),
'Cotations des Blocs pour les circuits {}'.format(grade))
for grade in self._circuit_statistics.circuit_grades}
##############################################
def _make_barchart(self, histogram, title):
grade_counters = histogram.domain()
if grade_counters:
data = {
'labels': [str(grade_counter) for grade_counter in grade_counters],
'counts': [grade_counter.count for grade_counter in grade_counters],
}
# engine = self._make_bokeh_barchart
engine = self._make_svg_barchart
return engine(data, title)
else:
return None
##############################################
def _make_bokeh_barchart(self, data, title):
# Workaround to don't sort labels
label = CatAttr(df=data, columns='labels', sort=False)
bar = Bar(data,
values='counts', label=label,
title=title,
xlabel='',
ylabel='',
plot_width=300,
plot_height=200,
responsive=True,
tools='',
toolbar_location=None,
)
return BokehPlot(bar)
##############################################
def _make_svg_barchart(self, data, title):
dpi = 100
figure_width = 1000 / dpi
aspect_ratio = 16 / 9
figure_height = figure_width / aspect_ratio
figure = Figure(figsize=(figure_width, figure_height), dpi=dpi, facecolor='white')
axes = figure.add_subplot(1, 1, 1)
y = data['counts']
x = np.arange(len(y))
width = .5
bar_chart = axes.bar(x, y, width=width, color='r', edgecolor='white')
axes.set_ylabel('')
axes.set_title(title, fontsize=20)
axes.set_xticks(x + width/2)
axes.xaxis.set_tick_params(width=0)
axes.set_xticklabels(data['labels'], rotation=45, fontsize=15)
axes.grid(axis='y')
canvas = FigureCanvas(figure)
image_data = StringIO()
canvas.print_figure(image_data, format='svg')
svg = image_data.getvalue()
svg = svg[svg.find('<svg'):]
return SvgPlot(svg)
##############################################
@property
def circuit_statistics(self):
return self._circuit_statistics
@property
def circuit_grade_barchart(self):
return self._circuit_grade_barchart
@property
def global_boulder_grade_barchart(self):
return self._global_boulder_grade_barchart
@property
def circuit_grades(self):
return list(self._boulder_grade_barchart_map.keys())
def boulder_grade_barchart(self, grade):
return self._boulder_grade_barchart_map[grade]
####################################################################################################
class CircuitStatisticsCache:
##############################################
def __init__(self):
self._cache = {}
##############################################
def __getitem__(self, circuits):
ids = [id(circuit) for circuit in circuits]
id_string = ''.join([str(x) for x in sorted(ids)])
key = hashlib.sha256(id_string.encode('ascii'))
if key not in self._cache:
self._cache[key] = CircuitStatisticsData(circuits)
return self._cache[key]
| agpl-3.0 | -5,466,395,071,295,208,000 | 33.596859 | 127 | 0.491525 | false |
jobscore/sync-engine | migrations/versions/141_remote_remote_contacts.py | 9 | 1035 | """Remove notion of 'remote' contact and drop contact 'source' column
Revision ID: 3ab34bc85c8d
Revises: 3f01a3f1b4cc
Create Date: 2015-02-16 16:03:45.288539
"""
# revision identifiers, used by Alembic.
revision = '3ab34bc85c8d'
down_revision = '3f01a3f1b4cc'
from alembic import op
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
from inbox.models.session import session_scope
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
class Contact_Old(Base):
__table__ = Base.metadata.tables['contact']
# Delete the "remote" contacts. This is just a server cache for comparing
# any changes, now handled by the previous "local" contacts
with session_scope() as db_session:
db_session.query(Contact_Old).filter_by(source='remote').delete()
op.drop_column('contact', 'source')
def downgrade():
raise Exception("Can't roll back. Migration removed data.")
| agpl-3.0 | -5,701,361,687,046,856,000 | 27.75 | 77 | 0.718841 | false |
D4wN/brickv | src/build_data/windows/OpenGL/GL/NV/pixel_data_range.py | 4 | 3489 | '''OpenGL extension NV.pixel_data_range
This module customises the behaviour of the
OpenGL.raw.GL.NV.pixel_data_range to provide a more
Python-friendly API
Overview (from the spec)
The vertex array range extension is intended to improve the
efficiency of OpenGL vertex arrays. OpenGL vertex arrays' coherency
model and ability to access memory from arbitrary locations in memory
prevented implementations from using DMA (Direct Memory Access)
operations.
Many image-intensive applications, such as those that use dynamically
generated textures, face similar problems. These applications would
like to be able to sustain throughputs of hundreds of millions of
pixels per second through DrawPixels and hundreds of millions of
texels per second through TexSubImage.
However, the same restrictions that limited vertex throughput also
limit pixel throughput.
By the time that any pixel operation that reads data from user memory
returns, OpenGL requires that it must be safe for the application to
start using that memory for a different purpose. This coherency
model prevents asynchronous DMA transfers directly out of the user's
buffer.
There are also no restrictions on the pointer provided to pixel
operations or on the size of the data. To facilitate DMA
implementations, the driver needs to know in advance what region of
the address space to lock down.
Vertex arrays faced both of these restrictions already, but pixel
operations have one additional complicating factor -- they are
bidirectional. Vertex array data is always being transfered from the
application to the driver and the HW, whereas pixel operations
sometimes transfer data to the application from the driver and HW.
Note that the types of memory that are suitable for DMA for reading
and writing purposes are often different. For example, on many PC
platforms, DMA pulling is best accomplished with write-combined
(uncached) AGP memory, while pushing data should use cached memory so
that the application can read the data efficiently once it has been
read back over the AGP bus.
This extension defines an API where an application can specify two
pixel data ranges, which are analogous to vertex array ranges, except
that one is for operations where the application is reading data
(e.g. glReadPixels) and one is for operations where the application
is writing data (e.g. glDrawPixels, glTexSubImage2D, etc.). Each
pixel data range has a pointer to its start and a length in bytes.
When the pixel data range is enabled, and if the pointer specified
as the argument to a pixel operation is inside the corresponding
pixel data range, the implementation may choose to asynchronously
pull data from the pixel data range or push data to the pixel data
range. Data pulled from outside the pixel data range is undefined,
while pushing data to outside the pixel data range produces undefined
results.
The application may synchronize with the hardware in one of two ways:
by flushing the pixel data range (or causing an implicit flush) or by
using the NV_fence extension to insert fences in the command stream.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/pixel_data_range.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.pixel_data_range import *
### END AUTOGENERATED SECTION | gpl-2.0 | -2,130,507,304,883,985,700 | 46.162162 | 70 | 0.799083 | false |
mbox/django | django/db/backends/sqlite3/schema.py | 2 | 8410 | from django.utils import six
from django.apps.registry import Apps
from django.db.backends.schema import BaseDatabaseSchemaEditor
from django.db.models.fields.related import ManyToManyField
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s)"
def quote_value(self, value):
# Inner import to allow nice failure for backend if not present
import _sqlite3
try:
value = _sqlite3.adapt(value)
except _sqlite3.ProgrammingError:
pass
# Manual emulation of SQLite parameter quoting
if isinstance(value, type(True)):
return str(int(value))
elif isinstance(value, six.integer_types):
return str(value)
elif isinstance(value, six.string_types):
return '"%s"' % six.text_type(value)
elif value is None:
return "NULL"
else:
raise ValueError("Cannot quote parameter value %r" % value)
def _remake_table(self, model, create_fields=[], delete_fields=[], alter_fields=[], rename_fields=[], override_uniques=None):
"""
Shortcut to transform a model from old_model into new_model
"""
# Work out the new fields dict / mapping
body = dict((f.name, f) for f in model._meta.local_fields)
mapping = dict((f.column, f.column) for f in model._meta.local_fields)
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
if any(f.primary_key for f in create_fields) or any(n.primary_key for o, n in alter_fields):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
for field in create_fields:
body[field.name] = field
# If there's a default, insert it into the copy map
if field.has_default():
mapping[field.column] = self.quote_value(
field.get_default()
)
# Add in any altered fields
for (old_field, new_field) in alter_fields:
del body[old_field.name]
del mapping[old_field.column]
body[new_field.name] = new_field
mapping[new_field.column] = old_field.column
# Remove any deleted fields
for field in delete_fields:
del body[field.name]
del mapping[field.column]
# Remove any implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.delete_model(field.rel.through)
# Work inside a new app registry
apps = Apps()
# Construct a new model for the new state
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table + "__new",
'unique_together': model._meta.unique_together if override_uniques is None else override_uniques,
'apps': apps,
}
meta = type("Meta", tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = model.__module__
temp_model = type(model._meta.object_name, model.__bases__, body)
# Create a new table with that format
self.create_model(temp_model)
# Copy data from the old table
field_maps = list(mapping.items())
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(temp_model._meta.db_table),
', '.join(self.quote_name(x) for x, y in field_maps),
', '.join(self.quote_name(y) for x, y in field_maps),
self.quote_name(model._meta.db_table),
))
# Delete the old table (not using self.delete_model to avoid deleting
# all implicit M2M tables)
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
# Rename the new to the old
self.alter_db_table(model, temp_model._meta.db_table, model._meta.db_table)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql.replace(temp_model._meta.db_table, model._meta.db_table))
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.create_model(field.rel.through)
self._remake_table(model, create_fields=[field])
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
if isinstance(field, ManyToManyField):
# For implicit M2M tables, delete the auto-created table
if field.rel.through._meta.auto_created:
self.delete_model(field.rel.through)
# For explicit "through" M2M fields, do nothing
# For everything else, remake.
else:
self._remake_table(model, delete_fields=[field])
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if old_type is None and new_type is None and (old_field.rel.through and new_field.rel.through and old_field.rel.through._meta.auto_created and new_field.rel.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None or new_type is None:
raise ValueError("Cannot alter field %s into %s - they are not compatible types (probably means only one is an M2M with implicit through model)" % (
old_field,
new_field,
))
# Alter by remaking table
self._remake_table(model, alter_fields=[(old_field, new_field)])
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_uniques=new_unique_together)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
if old_field.rel.through._meta.db_table == new_field.rel.through._meta.db_table:
return
# Make a new through table
self.create_model(new_field.rel.through)
# Copy the data across
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_field.rel.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.rel.through._meta.db_table),
))
# Delete the old through table
self.delete_model(old_field.rel.through)
| bsd-3-clause | -3,977,547,470,791,322,000 | 43.497354 | 193 | 0.592271 | false |
kr41/ggrc-core | src/ggrc/models/person.py | 4 | 5073 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import re
from sqlalchemy.orm import validates
from ggrc import db
from ggrc import settings
from ggrc.models.computed_property import computed_property
from ggrc.models.context import HasOwnContext
from ggrc.models.exceptions import ValidationError
from ggrc.models.deferred import deferred
from ggrc.models.mixins import Base, CustomAttributable
from ggrc.models.custom_attribute_definition import CustomAttributeMapable
from ggrc.models.reflection import PublishOnly
from ggrc.models.relationship import Relatable
from ggrc.models.utils import validate_option
class Person(CustomAttributable, CustomAttributeMapable, HasOwnContext,
Relatable, Base, db.Model):
__tablename__ = 'people'
email = deferred(db.Column(db.String, nullable=False), 'Person')
name = deferred(db.Column(db.String), 'Person')
language_id = deferred(db.Column(db.Integer), 'Person')
company = deferred(db.Column(db.String), 'Person')
object_people = db.relationship(
'ObjectPerson', backref='person', cascade='all, delete-orphan')
object_owners = db.relationship(
'ObjectOwner', backref='person', cascade='all, delete-orphan')
language = db.relationship(
'Option',
primaryjoin='and_(foreign(Person.language_id) == Option.id, '
'Option.role == "person_language")',
uselist=False,
)
@staticmethod
def _extra_table_args(cls):
return (
db.Index('ix_people_name_email', 'name', 'email'),
db.Index('uq_people_email', 'email', unique=True),
)
_fulltext_attrs = [
'company',
'email',
'name',
]
_publish_attrs = [
'company',
'email',
'language',
'name',
PublishOnly('object_people'),
PublishOnly('system_wide_role'),
]
_sanitize_html = [
'company',
'name',
]
_include_links = []
_aliases = {
"name": "Name",
"email": {
"display_name": "Email",
"unique": True,
},
"company": "Company",
"user_role": {
"display_name": "Role",
"type": "user_role",
"filter_by": "_filter_by_user_role",
},
}
@classmethod
def _filter_by_user_role(cls, predicate):
from ggrc_basic_permissions.models import Role, UserRole
return UserRole.query.join(Role).filter(
(UserRole.person_id == cls.id) &
(UserRole.context_id == None) & # noqa
predicate(Role.name)
).exists()
# Methods required by Flask-Login
# pylint: disable=no-self-use
def is_authenticated(self):
return self.system_wide_role != 'No Access'
def is_active(self):
# pylint: disable=no-self-use
return True # self.active
def is_anonymous(self):
# pylint: disable=no-self-use
return False
def get_id(self):
return unicode(self.id) # noqa
@validates('language')
def validate_person_options(self, key, option):
return validate_option(self.__class__.__name__, key, option,
'person_language')
@validates('email')
def validate_email(self, key, email):
if not Person.is_valid_email(email):
message = "Must provide a valid email address"
raise ValidationError(message)
return email
@staticmethod
def is_valid_email(val):
# Borrowed from Django
# literal form, ipv4 address (SMTP 4.1.3)
email_re = re.compile(
'^[-!#$%&\'*+\\.\/0-9=?A-Z^_`{|}~]+@([-0-9A-Z]+\.)+([0-9A-Z]){2,4}$',
re.IGNORECASE)
return email_re.match(val) if val else False
@classmethod
def eager_query(cls):
from sqlalchemy import orm
# query = super(Person, cls).eager_query()
# Completely overriding eager_query to avoid eager loading of the
# modified_by relationship
return super(Person, cls).eager_query().options(
orm.joinedload('language'),
orm.subqueryload('object_people'),
)
def _display_name(self):
return self.email
@computed_property
def system_wide_role(self):
"""For choosing the role string to show to the user; of all the roles in
the system-wide context, it shows the highest ranked one (if there are
multiple) or "No Access" if there are none.
"""
# FIXME: This method should be in `ggrc_basic_permissions`, since it
# depends on `Role` and `UserRole` objects
if self.email in getattr(settings, "BOOTSTRAP_ADMIN_USERS", []):
return u"Superuser"
role_hierarchy = {
u'Administrator': 0,
u'Editor': 1,
u'Reader': 2,
u'Creator': 3,
}
unique_roles = set([
user_role.role.name
for user_role in self.user_roles
if user_role.role.name in role_hierarchy
])
if len(unique_roles) == 0:
return u"No Access"
else:
# -1 as default to make items not in this list appear on top
# and thus shown to the user
sorted_roles = sorted(unique_roles,
key=lambda x: role_hierarchy.get(x, -1))
return sorted_roles[0]
| apache-2.0 | -1,039,642,043,098,073,700 | 28.666667 | 78 | 0.634339 | false |
paboldin/rally | tests/unit/plugins/openstack/context/test_fuel.py | 6 | 4160 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally import exceptions
from rally.plugins.openstack.context import fuel
from tests.unit import test
BASE = "rally.plugins.openstack.context.fuel"
class FuelEnvGeneratorTestCase(test.TestCase):
@mock.patch(BASE + ".FuelEnvGenerator._create_envs",
return_value=["env1"])
@mock.patch(BASE + ".fuel_utils.FuelScenario")
def test_setup(self, mock_fuel_scenario, mock__create_envs):
context = {}
context["config"] = {"fuel_environments": {"environments": 1}}
context["task"] = {"uuid": "some_uuid"}
context["admin"] = {"endpoint": "some_endpoint"}
env_ctx = fuel.FuelEnvGenerator(context)
env_ctx.setup()
self.assertIn("fuel", env_ctx.context)
self.assertIn("environments", env_ctx.context["fuel"])
mock__create_envs.assert_called_once_with()
mock_fuel_scenario.assert_called_once_with(context)
@mock.patch(BASE + ".FuelEnvGenerator._create_envs",
return_value=["env1"])
@mock.patch(BASE + ".fuel_utils.FuelScenario")
def test_setup_error(self, mock_fuel_scenario, mock__create_envs):
context = {}
context["config"] = {"fuel_environments": {"environments": 5}}
context["task"] = {"uuid": "some_uuid"}
context["admin"] = {"endpoint": "some_endpoint"}
env_ctx = fuel.FuelEnvGenerator(context)
self.assertRaises(exceptions.ContextSetupFailure, env_ctx.setup)
def test__create_envs(self):
config = {"environments": 4,
"release_id": 42,
"network_provider": "provider",
"deployment_mode": "mode",
"net_segment_type": "type",
"resource_management_workers": 3}
context = {"task": {},
"config": {"fuel_environments": config}}
env_ctx = fuel.FuelEnvGenerator(context)
env_ctx.fscenario = mock.Mock()
env_ctx.fscenario.return_value._create_environment.return_value = "id"
self.assertEqual(config["environments"], len(env_ctx._create_envs()))
enves = config.pop("environments")
config.pop("resource_management_workers")
exp_calls = [mock.call(**config) for i in range(enves)]
self.assertEqual(
exp_calls,
env_ctx.fscenario._create_environment.mock_calls)
def test__delete_envs(self):
config = {"release_id": 42,
"network_provider": "provider",
"deployment_mode": "mode",
"net_segment_type": "type",
"resource_management_workers": 3}
context = {"task": {},
"config": {"fuel_environments": config},
"fuel": {"environments": ["id", "id", "id"]}}
env_ctx = fuel.FuelEnvGenerator(context)
env_ctx.fscenario = mock.Mock()
env_ctx._delete_envs()
self.assertEqual({}, context["fuel"])
def test_cleanup(self):
config = {"release_id": 42,
"network_provider": "provider",
"deployment_mode": "mode",
"net_segment_type": "type",
"resource_management_workers": 3}
context = {"task": {"uuid": "some_id"},
"config": {"fuel_environments": config},
"fuel": {"environments": ["id", "id", "id"]}}
env_ctx = fuel.FuelEnvGenerator(context)
env_ctx._delete_envs = mock.Mock()
env_ctx.cleanup()
env_ctx._delete_envs.assert_called_once_with()
| apache-2.0 | 6,241,995,310,826,314,000 | 37.518519 | 78 | 0.589183 | false |
h4wkmoon/shinken | test/test_missing_cariarereturn.py | 1 | 1610 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_missing_cariarereturn.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST")
self.assert_(svc is not None)
self.assert_(len(svc.checks_in_progress) >= 1)
print svc.checks_in_progress[0].command
self.assert_(svc.checks_in_progress[0].command == 'plugins/nothing BLABLA')
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | -3,284,252,839,235,817,500 | 32.541667 | 86 | 0.690683 | false |
cajt/sandbox-loa | sets/nexys2_spw/src_py/spw.py | 1 | 1846 |
#!/usr/bin/env python
class spw(object):
def __init__(self, fpga, baseaddr):
self.fpga = fpga
self.baseaddr = baseaddr
def sendEOP(self):
fpga.write(self.baseaddr, 0x100 | 0x00)
def sendEEP(self):
fpga.write(self.baseaddr, 0x100 | 0x01)
def send(self, s):
fpga.write(self.baseaddr, ord(s) & 0xff)
def read(self):
fpga.read(self.baseaddr)
def setControl(self, txClkDiv=4, linkStart=True, linkDis=False, autostart=True ):
val = (int(txClkDiv)<<8) & 0xff00
if linkStart:
val += 0x2
if linkDis:
val += 0x4
if autostart:
val+= 0x1
fpga.write(self.baseaddr+2, val)
def getStatus(self):
return fpga.read(self.baseaddr+1)
if __name__ == "__main__":
import time
import sys
import hdlc_busmaster
if len(sys.argv) == 1:
port = "/dev/ttyUSB0"
else:
port = sys.argv[1]
fpga = hdlc_busmaster.hdlc_busmaster(port)
#sw = fpga.read(0x0000)
#print hex(sw)
spw0 = spw(fpga, 0x0010)
spw1 = spw(fpga, 0x0020)
print("Actual State")
#print("SpW Link 0 status: 0x%4.0x" % spw0.getStatus())
#print("SpW Link 1 status: 0x%4.0x" % spw1.getStatus())
spw0.setControl(linkStart=False, linkDis=True, autostart=False)
spw1.setControl(linkStart=False, linkDis=True, autostart=False)
spw1.send("A")
spw1.sendEOP()
print spw0.read()
print("Link was disabled")
#print("SpW Link 0 status: 0x%4.0x" % spw0.getStatus())
#print("SpW Link 1 status: 0x%4.0x" % spw1.getStatus())
print "gogogo"
spw0.setControl()
spw1.setControl()
print("Link was enabled")
print("SpW Link 0 status: 0x%4.0x" % spw0.getStatus())
#print("SpW Link 1 status: 0x%4.0x" % spw1.getStatus())
| bsd-2-clause | -3,531,568,334,557,404,000 | 22.974026 | 85 | 0.588299 | false |
elsigh/browserscope | third_party/appengine_tools/devappserver2/http_runtime_constants.py | 8 | 1679 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Constants used for communicating with the Python devappserver2 runtime."""
SERVER_SOFTWARE = 'Development/2.0'
INTERNAL_HEADER_PREFIX = 'X-Appengine-Internal-'
INTERNAL_ENVIRON_PREFIX = 'HTTP_X_APPENGINE_INTERNAL_'
REQUEST_ID_HEADER = 'X-Appengine-Internal-Request-Id'
REQUEST_ID_ENVIRON = 'HTTP_X_APPENGINE_INTERNAL_REQUEST_ID'
ENVIRONS_TO_PROPAGATE = set([
'BACKEND_ID',
'DEFAULT_VERSION_HOSTNAME',
'USER_ID',
'USER_IS_ADMIN',
'USER_EMAIL',
'USER_NICKNAME',
'USER_ORGANIZATION',
'REMOTE_ADDR',
'REQUEST_ID_HASH',
'REQUEST_LOG_ID',
'SERVER_NAME',
'SERVER_PORT',
'SERVER_PROTOCOL',
])
SCRIPT_HEADER = INTERNAL_ENVIRON_PREFIX + 'SCRIPT'
# A request header where the value is a string containing the request type, e.g.
# background.
REQUEST_TYPE_HEADER = INTERNAL_ENVIRON_PREFIX + 'REQUEST_TYPE'
# A response header used by the runtime to indicate that an uncaught error has
# ocurred and that a user-specified error handler should be used if available.
ERROR_CODE_HEADER = '%sError-Code' % INTERNAL_HEADER_PREFIX
| apache-2.0 | -6,128,353,990,506,186,000 | 31.921569 | 80 | 0.726027 | false |
jakirkham/lazyflow | lazyflow/request/request.py | 1 | 54502 | ###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
# Built-in
import sys
import heapq
import functools
import itertools
import threading
import multiprocessing
import platform
import traceback
import StringIO
import logging
logger = logging.getLogger(__name__)
# Third-party
import greenlet
# lazyflow
import threadPool
# This module's code needs to be sanitized if you're not using CPython.
# In particular, check that set operations like remove() are still atomic.
assert platform.python_implementation() == "CPython"
class RequestGreenlet(greenlet.greenlet):
def __init__(self, owning_request, fn):
super(RequestGreenlet, self).__init__(fn, greenlet.getcurrent())
self.owning_requests = [owning_request]
class SimpleSignal(object):
"""
Simple callback mechanism. Not synchronized. No unsubscribe function.
"""
def __init__(self):
self.callbacks = []
self._cleaned = False
def subscribe(self, fn):
self.callbacks.append(fn)
def __call__(self, *args, **kwargs):
"""Emit the signal."""
assert not self._cleaned, "Can't emit a signal after it's already been cleaned!"
for f in self.callbacks:
f(*args, **kwargs)
def clean(self):
self._cleaned = True
self.callbacks = []
def log_exception( logger, msg=None, exc_info=None, level=logging.ERROR ):
"""
Log the current exception to the given logger, and also log the given error message.
If exc_info is provided, log that exception instead of the current exception provided by sys.exc_info.
It is better to log exceptions this way instead of merely printing them to the console,
so that other logger outputs (such as log files) show the exception, too.
"""
sio = StringIO.StringIO()
if exc_info:
traceback.print_exception( exc_info[0], exc_info[1], exc_info[2], file=sio )
else:
traceback.print_exc( file=sio )
logger.log(level, sio.getvalue() )
if msg:
logger.log(level, msg )
class Request( object ):
# One thread pool shared by all requests.
# See initialization after this class definition (below)
global_thread_pool = None
@classmethod
def reset_thread_pool( cls, num_workers = multiprocessing.cpu_count() ):
"""
Change the number of threads allocated to the request system.
As a special case, you may set ``num_workers`` to 0.
In that case, the normal thread pool is not used at all.
Instead, all requests will execute synchronously, from within the submitting thread.
Utilities like ``RequestLock``, ``SimpleRequestCondition`` will use alternate
implementations based on equivalent classes in the builtin ``threading`` module.
.. note:: It is only valid to call this function during startup.
Any existing requests will be dropped from the pool!
"""
if cls.global_thread_pool is not None:
cls.global_thread_pool.stop()
cls.global_thread_pool = threadPool.ThreadPool( num_workers )
class CancellationException(Exception):
"""
This is raised when the whole request has been cancelled.
If you catch this exception from within a request, clean up and return immediately.
If you have nothing to clean up, you are not required to handle this exception.
Implementation details:
This exception is raised when the cancel flag is checked in the wait() function:
- immediately before the request is suspended OR
- immediately after the request is woken up from suspension
"""
pass
class InvalidRequestException(Exception):
"""
This is raised when calling wait on a request that has already been cancelled,
which can only happen if the request you're waiting for was spawned elsewhere
(i.e. you are waiting for someone else's request to avoid duplicate work).
When this occurs, you will typically want to restart the request yourself.
"""
pass
class CircularWaitException(Exception):
"""
This exception is raised if a request calls wait() on itself.
Currently, this only catches the most basic case.
No attempt is made to detect indirect cycles
(e.g. if req.wait() is called from within a req's own child.),
so don't rely on it to catch tricky deadlocks due to indirect self-waiting.
"""
pass
class TimeoutException(Exception):
"""
This is raised if a call to wait() times out in the context of a foreign thread.
See ``Request.wait()`` for details.
"""
pass
class InternalError(Exception):
"""
This is raised if an error is detected in the Request framework itself.
If this exception is raised, it implies a bug in this file (request.py).
"""
pass
_root_request_counter = itertools.count()
def __init__(self, fn):
"""
Constructor.
Postconditions: The request has the same cancelled status as its parent (the request that is creating this one).
"""
# Workload
self.fn = fn
#: After this request finishes execution, this attribute holds the return value from the workload function.
self._result = None
# State
self.started = False
self.cancelled = False
self.uncancellable = False
self.finished = False
self.execution_complete = False
self.finished_event = threading.Event()
self.exception = None
self.exception_info = (None, None, None)
self._cleaned = False
# Execution
self.greenlet = None # Not created until assignment to a worker
self._assigned_worker = None
# Request relationships
self.pending_requests = set() # Requests that are waiting for this one
self.blocking_requests = set() # Requests that this one is waiting for (currently one at most since wait() can only be called on one request at a time)
self.child_requests = set() # Requests that were created from within this request (NOT the same as pending_requests)
self._current_foreign_thread = None
current_request = Request._current_request()
self.parent_request = current_request
self._max_child_priority = 0
if current_request is None:
self._priority = [ Request._root_request_counter.next() ]
else:
with current_request._lock:
current_request.child_requests.add(self)
# We must ensure that we get the same cancelled status as our parent.
self.cancelled = current_request.cancelled
# We acquire the same priority as our parent, plus our own sub-priority
current_request._max_child_priority += 1
self._priority = current_request._priority + [ current_request._max_child_priority ]
self._lock = threading.Lock() # NOT an RLock, since requests may share threads
self._sig_finished = SimpleSignal()
self._sig_cancelled = SimpleSignal()
self._sig_failed = SimpleSignal()
self._sig_execution_complete = SimpleSignal()
def __lt__(self, other):
"""
Request comparison is by priority.
This allows us to store them in a heap.
"""
if other is None:
# In RequestLock, we sometimes compare Request objects with None,
# which is permitted. None is considered less (higher priority)
return False
return self._priority < other._priority
def __str__(self):
return "fn={}, assigned_worker={}, started={}, execution_complete={}, exception={}, "\
"greenlet={}, current_foreign_thread={}, uncancellable={}"\
.format( self.fn, self.assigned_worker, self.started, self.execution_complete, \
self.exception, self.greenlet, self._current_foreign_thread, self.uncancellable )
def clean(self, _fullClean=True):
"""
Delete all state from the request, for cleanup purposes.
Removes references to callbacks, children, and the result.
:param _fullClean: Internal use only. If False, only clean internal bookkeeping members.
Otherwise, delete everything, including the result.
"""
self._sig_cancelled.clean()
self._sig_finished.clean()
self._sig_failed.clean()
with self._lock:
for child in self.child_requests:
child.parent_request = None
self.child_requests.clear()
parent_req = self.parent_request
if parent_req is not None:
with parent_req._lock:
parent_req.child_requests.discard(self)
if _fullClean:
self._cleaned = True
self._result = None
@property
def assigned_worker(self):
"""
This member is accessed by the ThreadPool to determine which Worker thread this request belongs to.
"""
return self._assigned_worker
@assigned_worker.setter
def assigned_worker(self, worker):
"""
Assign this request to the given worker thread. (A request cannot switch between threads.)
Must be called from the worker thread.
"""
assert self._assigned_worker is None
self._assigned_worker = worker
# Create our greenlet now (so the greenlet has the correct parent, i.e. the worker)
self.greenlet = RequestGreenlet(self, self._execute)
@property
def result(self):
assert not self._cleaned, "Can't get this result. The request has already been cleaned!"
assert self.execution_complete, "Can't access the result until the request is complete."
assert not self.cancelled, "Can't access the result of a cancelled request."
assert self.exception is None, "Can't access this result. The request failed."
return self._result
def _execute(self):
"""
Do the real work of this request.
"""
# Did someone cancel us before we even started?
if not self.cancelled:
try:
# Do the actual work
self._result = self.fn()
except Request.CancellationException:
# Don't propagate cancellations back to the worker thread,
# even if the user didn't catch them.
pass
except Exception as ex:
# The workload raised an exception.
# Save it so we can raise it in any requests that are waiting for us.
self.exception = ex
self.exception_info = sys.exc_info() # Documentation warns of circular references here,
# but that should be okay for us.
self._post_execute()
def _post_execute(self):
# Guarantee that self.finished doesn't change while wait() owns self._lock
with self._lock:
self.finished = True
try:
# Notify callbacks (one or the other, not both)
if self.cancelled:
self._sig_cancelled()
elif self.exception is not None:
self._sig_failed( self.exception, self.exception_info )
else:
self._sig_finished(self._result)
except Exception as ex:
# If we're here, then our completion handler function (e.g. sig_finished or sig_failed)
# raised an exception.
failed_during_failure_handler = (self.exception is not None)
# Save the exception so we can re-raise it in any requests that are waiting for us.
# Otherwise, the threadpool just dies.
self.exception = ex
self.exception_info = sys.exc_info() # Documentation warns of circular references here,
# but that should be okay for us.
# If we already fired sig_failed(), then there's no point in firing it again.
# That's the function that caused this problem in the first place!
if not failed_during_failure_handler:
self._sig_failed( self.exception, self.exception_info )
else:
# Now that we're complete, the signals have fired and any requests we needed to wait for have completed.
# To free memory (and child requests), we can clean up everything but the result.
self.clean( _fullClean=False )
finally:
# Unconditionally signal (internal use only)
with self._lock:
self.execution_complete = True
self._sig_execution_complete()
self._sig_execution_complete.clean()
# Notify non-request-based threads
self.finished_event.set()
# Clean-up
if self.greenlet is not None:
popped = self.greenlet.owning_requests.pop()
assert popped == self
self.greenlet = None
def submit(self):
"""
If this request isn't started yet, schedule it to be started.
"""
if Request.global_thread_pool.num_workers > 0:
with self._lock:
if not self.started:
self.started = True
self._wake_up()
else:
# For debug purposes, we support a worker count of zero.
# In that case, ALL REQUESTS ARE synchronous.
# This can have unintended consequences. Use with care.
if not self.started:
self.started = True
self._execute()
# TODO: Exactly how to handle cancellation in this debug mode is not quite clear...
# The _execute() function normally intercepts exceptions to hide them from the worker threads.
# In this debug mode, we want to re-raise the exception.
if self.exception is not None:
raise self.exception_info[0], self.exception_info[1], self.exception_info[2]
def _wake_up(self):
"""
Resume this request's execution (put it back on the worker's job queue).
"""
Request.global_thread_pool.wake_up(self)
def _switch_to(self):
"""
Switch to this request's greenlet
"""
try:
self.greenlet.switch()
except greenlet.error:
# This is a serious error.
# If we are handling an exception here, it means there's a bug in the request framework,
# not the client's code.
msg = "Current thread ({}) could not start/resume task: {}"\
.format( threading.current_thread().name, self )
log_exception( logger, msg, level=logging.CRITICAL )
# We still run the post-execute code, so that all requests waiting on this
# one will be notified of the error and produce their own tracebacks.
# Hopefully that will help us reproduce/debug the issue.
self.exception = Request.InternalError( "A serious error was detected while waiting for another request. "
"Check the log for other exceptions." )
self.exception_info = ( type(self.exception),
self.exception,
sys.exc_info()[2] )
self._post_execute()
# And now we simply return instead of letting this worker die.
#def __call__(self):
# """
# Resume (or start) the request execution.
# This is implemented in __call__ so that it can be used with the ThreadPool, which is designed for general callable tasks.
#
# .. note:: DO NOT use ``Request.__call__`` explicitly from your code. It is called internally or from the ThreadPool.
# """
# self._switch_to()
# Implement __call__ with a direct assignment instead of the
# above implementation to avoid an unecessary function call.
__call__ = _switch_to
def _suspend(self):
"""
Suspend this request so another one can be woken up by the worker.
"""
# Switch back to the worker that we're currently running in.
try:
self.greenlet.parent.switch()
except greenlet.error:
logger.critical( "Current thread ({}) could not suspend task: {}. (parent greenlet={})"
.format( threading.current_thread().name, self, self.greenlet.parent ) )
raise
def wait(self, timeout=None):
"""
Start this request if necessary, then wait for it to complete. Return the request's result.
:param timeout: If running within a request, this parameter must be None.
If running within the context of a foreign (non-request) thread,
a timeout may be specified in seconds (floating-point).
If the request does not complete within the timeout period,
then a Request.TimeoutException is raised.
"""
assert not self._cleaned, "Can't wait() for a request that has already been cleaned."
return self._wait(timeout)
def block(self, timeout=None):
"""
Like wait, but does not return a result. Can be used even if the request has already been cleaned.
"""
self._wait(timeout) # No return value. Use wait()
def _wait(self, timeout=None):
# Quick shortcut:
# If there's no need to wait, just return immediately.
# This avoids some function calls and locks.
# (If we didn't do this, the code below would still do the right thing.)
# Note that this is only possible because self.execution_complete is set to True
# AFTER self.cancelled and self.exception have their final values. See _execute().
if self.execution_complete and not self.cancelled and self.exception is None:
return self._result
# Identify the request that is waiting for us (the current context)
current_request = Request._current_request()
if current_request is None:
# 'None' means that this thread is not one of the request worker threads.
self._wait_within_foreign_thread( timeout )
else:
assert timeout is None, "The timeout parameter may only be used when wait() is called from a foreign thread."
self._wait_within_request( current_request )
assert self.finished
return self._result
def _wait_within_foreign_thread(self, timeout):
"""
This is the implementation of wait() when executed from a foreign (non-worker) thread.
Here, we rely on an ordinary threading.Event primitive: ``self.finished_event``
"""
# Don't allow this request to be cancelled, since a real thread is waiting for it.
self.uncancellable = True
with self._lock:
direct_execute_needed = not self.started and (timeout is None)
if direct_execute_needed:
# This request hasn't been started yet
# We can execute it directly in the current thread instead of submitting it to the request thread pool (big optimization).
# Mark it as 'started' so that no other greenlet can claim it
self.started = True
if self._current_foreign_thread is not None and self._current_foreign_thread == threading.current_thread():
# It's usually nonsense for a request to wait for itself,
# but we allow it if the request is already "finished"
# (which can happen if the request is calling wait() from within a notify_finished callback)
if self.finished:
if self.exception is not None:
raise self.exception_info[0], self.exception_info[1], self.exception_info[2]
else:
return
else:
raise Request.CircularWaitException()
if direct_execute_needed:
self._current_foreign_thread = threading.current_thread()
self._execute()
else:
self.submit()
# This is a non-worker thread, so just block the old-fashioned way
completed = self.finished_event.wait(timeout)
if not completed:
raise Request.TimeoutException()
if self.cancelled:
# It turns out this request was already cancelled.
raise Request.InvalidRequestException()
if self.exception is not None:
raise self.exception_info[0], self.exception_info[1], self.exception_info[2]
def _wait_within_request(self, current_request):
"""
This is the implementation of wait() when executed from another request.
If we have to wait, suspend the current request instead of blocking the whole worker thread.
"""
# Before we suspend the current request, check to see if it's been cancelled since it last blocked
if current_request.cancelled:
raise Request.CancellationException()
if current_request == self:
# It's usually nonsense for a request to wait for itself,
# but we allow it if the request is already "finished"
# (which can happen if the request is calling wait() from within a notify_finished callback)
if self.finished:
return
else:
raise Request.CircularWaitException()
with self._lock:
# If the current request isn't cancelled but we are,
# then the current request is trying to wait for a request (i.e. self) that was spawned elsewhere and already cancelled.
# If they really want it, they'll have to spawn it themselves.
if self.cancelled:
raise Request.InvalidRequestException()
if self.exception is not None:
# This request was already started and already failed.
# Simply raise the exception back to the current request.
raise self.exception_info[0], self.exception_info[1], self.exception_info[2]
direct_execute_needed = not self.started
suspend_needed = self.started and not self.execution_complete
if direct_execute_needed or suspend_needed:
current_request.blocking_requests.add(self)
self.pending_requests.add(current_request)
if direct_execute_needed:
# This request hasn't been started yet
# We can execute it directly in the current greenlet instead of creating a new greenlet (big optimization)
# Mark it as 'started' so that no other greenlet can claim it
self.started = True
elif suspend_needed:
# This request is already started in some other greenlet.
# We must suspend the current greenlet while we wait for this request to complete.
# Here, we set up a callback so we'll wake up once this request is complete.
self._sig_execution_complete.subscribe( functools.partial(current_request._handle_finished_request, self) )
if suspend_needed:
current_request._suspend()
elif direct_execute_needed:
# Optimization: Don't start a new greenlet. Directly run this request in the current greenlet.
self.greenlet = current_request.greenlet
self.greenlet.owning_requests.append(self)
self._assigned_worker = current_request._assigned_worker
self._execute()
self.greenlet = None
current_request.blocking_requests.remove(self)
if suspend_needed or direct_execute_needed:
# No need to lock here because set.remove is atomic in CPython.
#with self._lock:
self.pending_requests.remove( current_request )
# Now we're back (no longer suspended)
# Was the current request cancelled while it was waiting for us?
if current_request.cancelled:
raise Request.CancellationException()
# Are we back because we failed?
if self.exception is not None:
raise self.exception_info[0], self.exception_info[1], self.exception_info[2]
def _handle_finished_request(self, request, *args):
"""
Called when a request that we were waiting for has completed.
Wake ourselves up so we can resume execution.
"""
with self._lock:
# We're not waiting for this one any more
self.blocking_requests.remove(request)
if len(self.blocking_requests) == 0:
self._wake_up()
def notify_finished(self, fn):
"""
Register a callback function to be called when this request is finished.
If we're already finished, call it now.
:param fn: The callback to be notified. Signature: fn(result)
"""
assert not self._cleaned, "This request has been cleaned() already."
with self._lock:
finished = self.finished
if not finished:
# Call when we eventually finish
self._sig_finished.subscribe(fn)
if finished:
# Call immediately
fn(self._result)
def notify_cancelled(self, fn):
"""
Register a callback function to be called when this request is finished due to cancellation.
If we're already finished and cancelled, call it now.
:param fn: The callback to call if the request is cancelled. Signature: fn()
"""
assert not self._cleaned, "This request has been cleaned() already."
with self._lock:
finished = self.finished
cancelled = self.cancelled
if not finished:
# Call when we eventually finish
self._sig_cancelled.subscribe(fn)
if finished and cancelled:
# Call immediately
fn()
def notify_failed(self, fn):
"""
Register a callback function to be called when this request is finished due to failure (an exception was raised).
If we're already failed, call it now.
:param fn: The callback to call if the request fails. Signature: ``fn(exception, exception_info)``
exception_info is a tuple of (type, value, traceback). See Python documentation on
``sys.exc_info()`` for more documentation.
"""
assert not self._cleaned, "This request has been cleaned() already."
with self._lock:
finished = self.finished
failed = self.exception is not None
if not finished:
# Call when we eventually finish
self._sig_failed.subscribe(fn)
if finished and failed:
# Call immediately
fn(self.exception, self.exception_info)
def cancel(self):
"""
Attempt to cancel this request and all requests that it spawned.
No request will be cancelled if other non-cancelled requests are waiting for its results.
"""
# We can only be cancelled if:
# (1) There are no foreign threads blocking for us (flagged via self.uncancellable) AND
# (2) our parent request (if any) is already cancelled AND
# (3) all requests that are pending for this one are already cancelled
with self._lock:
cancelled = not self.uncancellable
cancelled &= (self.parent_request is None or self.parent_request.cancelled)
for r in self.pending_requests:
cancelled &= r.cancelled
self.cancelled = cancelled
if cancelled:
# Any children added after this point will receive our same cancelled status
child_requests = self.child_requests
self.child_requests = set()
if self.cancelled:
# Cancel all requests that were spawned from this one.
for child in child_requests:
child.cancel()
@classmethod
def _current_request(cls):
"""
Inspect the current greenlet/thread and return the request object associated with it, if any.
"""
current_greenlet = greenlet.getcurrent()
# Greenlets in worker threads have a monkey-patched 'owning-request' member
if hasattr(current_greenlet, 'owning_requests'):
return current_greenlet.owning_requests[-1]
else:
# There is no request associated with this greenlet.
# It must be a regular (foreign) thread.
return None
@classmethod
def current_request_is_cancelled(cls):
"""
Return True if called from within the context of a cancelled request.
"""
current_request = Request._current_request()
return current_request and current_request.cancelled
@classmethod
def raise_if_cancelled(cls):
"""
If called from the context of a cancelled request, raise a CancellationException immediately.
"""
if Request.current_request_is_cancelled():
raise Request.CancellationException()
##########################################
#### Backwards-compatible API support ####
##########################################
class _PartialWithAppendedArgs(object):
"""
Like functools.partial, but any kwargs provided are given last when calling the target.
"""
def __init__(self, fn, *args, **kwargs):
self.func = fn
self.args = args
self.kwargs = kwargs
def __call__(self, *args):
totalargs = args + self.args
return self.func( *totalargs, **self.kwargs)
def writeInto(self, destination):
self.fn = Request._PartialWithAppendedArgs( self.fn, destination=destination )
return self
def getResult(self):
return self.result
Request.reset_thread_pool()
class RequestLock(object):
"""
Request-aware lock. Implements the same interface as threading.Lock.
If acquire() is called from a normal thread, the the lock blocks the thread as usual.
If acquire() is called from a Request, then the request is suspended so that another Request can be resumed on the thread.
Requests and normal threads can *share* access to a RequestLock.
That is, they compete equally for access to the lock.
Implementation detail: Depends on the ability to call two *private* Request methods: _suspend() and _wake_up().
"""
class DEBUG_RequestLockQueue(object):
def __init__(self):
self._l = []
def __len__(self):
return len(self._l)
def push(self, item):
self._l.append(item)
def pop(self):
item = self._l[0]
self._l = self._l[1:]
return item
def popNone(self):
self._l.remove(None)
class RequestLockQueue(object):
"""
This is a pseudo-priority queue.
All items pushed consecutively (with no pop()s in between), will be prioritized.
But as soon as one call to pop() is made, newly pushed items will
NOT be included in the current set until it is exhausted.
This way, if a high-priority item is popped() and then immediately
re-pushed, it is not simply replaced at the head of the queue.
(It has to wait until the next "batch" of pops.)
"""
def __init__(self):
self._pushing_queue = []
self._popping_queue = []
def push(self, item):
heapq.heappush(self._pushing_queue, item)
def pop(self):
if not self._popping_queue:
self._pushing_queue, self._popping_queue = self._popping_queue, self._pushing_queue
return heapq.heappop(self._popping_queue)
def popNone(self):
if self._popping_queue and self._popping_queue[0] is None:
self._popping_queue = self._popping_queue[1:]
else:
assert self._pushing_queue[0] is None
self._pushing_queue = self._pushing_queue[1:]
def __len__(self):
return len(self._pushing_queue) + len(self._popping_queue)
logger = logging.getLogger(__name__ + ".RequestLock")
def __init__(self):
if Request.global_thread_pool.num_workers == 0:
self._debug_mode_init()
else:
# This member holds the state of this RequestLock
self._modelLock = threading.Lock()
# This member protects the _pendingRequests set from corruption
self._selfProtectLock = threading.Lock()
# This is a list of requests that are currently waiting for the lock.
# Other waiting threads (i.e. non-request "foreign" threads) are each listed as a single "None" item.
self._pendingRequests = RequestLock.RequestLockQueue()
def _debug_mode_init(self):
"""
For debug purposes, the user can use an empty threadpool.
In that case, all requests are executing synchronously.
(See Request.submit().)
In this debug mode, this class is simply a stand-in for an
RLock object from the builtin threading module.
"""
# Special debugging scenario:
# If there is no threadpool, just pretend to be an RLock
self._debug_lock = threading.RLock()
self.acquire = self._debug_lock.acquire
self.release = self._debug_lock.release
self.__enter__ = self._debug_lock.__enter__
self.__exit__ = self._debug_lock.__exit__
self.locked = lambda: self._debug_lock._RLock__owner is not None
def locked(self):
"""
Return True if lock is currently held by some thread or request.
"""
return self._modelLock.locked()
def acquire(self, blocking=True):
"""
Acquire the lock. If `blocking` is True, block until the lock is available.
If `blocking` is False, don't wait and return False if the lock couldn't be acquired immediately.
:param blocking: Same as in threading.Lock
"""
current_request = Request._current_request()
if current_request is None:
return self._acquire_from_within_thread(blocking)
else:
return self._acquire_from_within_request(current_request, blocking)
def _acquire_from_within_request(self, current_request, blocking):
with self._selfProtectLock:
# Try to get it immediately.
got_it = self._modelLock.acquire(False)
if not blocking:
return got_it
if not got_it:
# We have to wait. Add ourselves to the list of waiters.
self._pendingRequests.push(current_request)
if not got_it:
# Suspend the current request.
# When it is woken, it owns the _modelLock.
current_request._suspend()
# Now we're back (no longer suspended)
# Was the current request cancelled while it was waiting for the lock?
if current_request.cancelled:
raise Request.CancellationException()
# Guaranteed to own _modelLock now (see release()).
return True
def _acquire_from_within_thread(self, blocking):
if not blocking:
return self._modelLock.acquire(blocking)
with self._selfProtectLock:
# Append "None" to indicate that a real thread is waiting (not a request)
self._pendingRequests.push(None)
# Wait for the internal lock to become free
got_it = self._modelLock.acquire(blocking)
with self._selfProtectLock:
# Search for a "None" to pull off the list of pendingRequests.
# Don't take real requests from the queue
self._pendingRequests.popNone()
return got_it
def release(self):
"""
Release the lock so that another request or thread can acquire it.
"""
assert self._modelLock.locked(), "Can't release a RequestLock that isn't already acquired!"
with self._selfProtectLock:
if len(self._pendingRequests) == 0:
# There were no waiting requests or threads, so the lock is free to be acquired again.
self._modelLock.release()
else:
# Instead of releasing the modelLock, just wake up a request that was waiting for it.
# He assumes that the lock is his when he wakes up.
r = self._pendingRequests.pop()
if r is not None:
r._wake_up()
else:
# The pending "request" is a real thread.
# Release the lock to wake it up (he'll remove the _pendingRequest entry)
self._pendingRequests.push(None)
self._modelLock.release()
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args):
self.release()
class SimpleRequestCondition(object):
"""
A ``Request``-compatible condition variable that supports a limited
subset of the features implemented by the standard ``threading.Condition``.
**Limitations:**
- Only one request may call :py:meth:`wait()` at a time.
- Likewise, :py:meth:`notify()` doesn't accept the ``n`` arg.
- Likewise, there is no ``notify_all()`` method.
- :py:meth:`wait()` doesn't support the ``timeout`` arg.
.. note:: It would be nice if we could simply use ``threading.Condition( RequestLock() )`` instead of rolling
our own custom condition variable class, but that doesn't quite work in cases where we need to call
``wait()`` from a worker thread (a non-foreign thread).
(``threading.Condition`` uses ``threading.Lock()`` as its 'waiter' lock, which blocks the entire worker.)
**Example:**
.. code-block:: python
cond = SimpleRequestCondition()
def process_all_data():
with cond:
while not all_finished:
while not is_data_chunk_ready():
cond.wait()
all_finished = process_available_data()
def retrieve_some_data():
get_some_data()
with cond:
cond.notify()
req1 = Request( retrieve_some_data )
req2 = Request( retrieve_some_data )
req3 = Request( retrieve_some_data )
req1.submit()
req2.submit()
req3.submit()
# Wait for them all to finish...
process_all_data()
"""
logger = logging.getLogger(__name__ + ".SimpleRequestCondition")
def __init__(self):
if Request.global_thread_pool.num_workers == 0:
# Special debug mode.
self._debug_mode_init()
else:
self._ownership_lock = RequestLock()
self._waiter_lock = RequestLock() # Only one "waiter".
# Used to block the current request while we wait to be notify()ed.
# Export the acquire/release methods of the ownership lock
self.acquire = self._ownership_lock.acquire
self.release = self._ownership_lock.release
def _debug_mode_init(self):
"""
For debug purposes, the user can use an empty threadpool.
In that case, all requests are executing synchronously.
(See Request.submit().)
In this debug mode, this class is simply a stand-in for a 'real'
condition variable from the builtin threading module.
"""
# Special debug mode initialization:
# Just use a normal condition variable.
condition_lock = threading.RLock()
self._debug_condition = threading.Condition(condition_lock)
self._debug_condition.locked = lambda: condition_lock._RLock__count > 0
self.acquire = self._debug_condition.acquire
self.release = self._debug_condition.release
self.wait = self._debug_condition.wait
self.notify = self._debug_condition.notify
self._ownership_lock = self._debug_condition
#self.__enter__ = self._debug_condition.__enter__
#self.__exit__ = self._debug_condition.__exit__
def __enter__(self):
return self._ownership_lock.__enter__()
def __exit__(self, *args):
self._ownership_lock.__exit__(*args)
def wait(self):
"""
Wait for another request to call py:meth:``notify()``.
The caller **must** own (acquire) the condition before calling this method.
The condition is automatically ``released()`` while this method waits for
``notify()`` to be called, and automatically ``acquired()`` again before returning.
.. note:: Unlike ``threading.Condition``, it is **NOT** valid to call ``wait()``
from multiple requests in parallel. That is, this class supports only
one 'consumer' thread.
.. note:: Unlike ``threading.Condition``, no ``timeout`` parameter is accepted here.
"""
# Should start out in the non-waiting state
assert not self._waiter_lock.locked()
self._waiter_lock.acquire()
# Temporarily release the ownership lock while we wait for someone to release the waiter.
assert self._ownership_lock.locked(), "Forbidden to call SimpleRequestCondition.wait() unless you own the condition."
self._ownership_lock.release()
# Try to acquire the lock AGAIN.
# This isn't possible until someone releases it via notify()
# (Note that RequestLock does NOT have RLock semantics.)
self._waiter_lock.acquire()
# Re-acquire
self._ownership_lock.acquire()
# Reset for next wait()
# Must check release status here in case someone called notify() in between the previous two lines
if self._waiter_lock.locked():
self._waiter_lock.release()
def notify(self):
"""
Notify the condition that it can stop ``wait()``-ing.
The called **must** own (acquire) the condition before calling this method.
Also, the waiting request cannot return from ``wait()`` until the condition is released,
so the caller should generally release the condition shortly after calling this method.
.. note:: It is okay to call this from more than one request in parallel.
"""
assert self._ownership_lock.locked(), "Forbidden to call SimpleRequestCondition.notify() unless you own the condition."
# Release the waiter for anyone currently waiting
if self._waiter_lock.locked():
self._waiter_lock.release()
class RequestPool(object):
"""
Convenience class for submitting a batch of requests and waiting until they are all complete.
Requests can not be added to the pool after it has already started.
Not threadsafe:
- don't call add() from more than one thread
- don't call wait() or submit() from more than one thread
"""
class RequestPoolError(Exception):
"""
Raised if you attempt to use the Pool in a manner that it isn't designed for.
"""
pass
def __init__(self):
self._started = False
self._failed = False
self._requests = set()
self._finishing_requests = set()
self._set_lock = threading.Lock()
self._request_completed_condition = SimpleRequestCondition()
def __len__(self):
return len(self._requests)
def add(self, req):
"""
Add a request to the pool. The pool must not be submitted yet. Otherwise, an exception is raised.
"""
assert not req.started, "Can't submit an already-submitted request."
if self._started:
# For now, we forbid this because it would allow some corner cases that we aren't unit-testing yet.
# If this exception blocks a desirable use case, then change this behavior and provide a unit test.
raise RequestPool.RequestPoolError("Attempted to add a request to a pool that was already started!")
with self._set_lock:
self._requests.add(req)
req.notify_finished( functools.partial(self._transfer_request_to_finishing_queue, req, 'finished' ) )
req.notify_failed( functools.partial(self._transfer_request_to_finishing_queue, req, 'failed' ) )
req.notify_cancelled( functools.partial(self._transfer_request_to_finishing_queue, req, 'cancelled' ) )
def wait(self):
"""
If the pool hasn't been submitted yet, submit it. Then wait for all requests in the pool to complete.
To be efficient with memory, we attempt to discard requests quickly after they complete.
To achieve this, we keep requests in two sets:
_requests: All requests that are still executing or 'finishing'
_finishing_requests: Requests whose main work has completed, but may still be executing callbacks
(e.g. handlers for notify_finished)
Requests are transferred in batches from the first set to the second as they complete.
We block() for 'finishing' requests first, so they can be discarded quickly.
(If we didn't block for 'finishing' requests at all, we'd be violating the Request 'Callback Timing Guarantee',
which must hold for *both* Requests and RequestPools. See Request docs for details.)
"""
try:
if not self._started:
self.submit()
while self._requests:
with self._request_completed_condition:
if self._requests:
self._request_completed_condition.wait()
assert self._request_completed_condition._ownership_lock.locked()
self._clear_finishing_requests()
# Clear one last time, in case any finished right
# at the end of the last time through the loop.
self._clear_finishing_requests()
except:
self._failed = True
self.clean()
raise
def cancel(self):
"""
Cancel all requests in the pool.
"""
for req in self._requests:
req.cancel()
self.clean()
def submit(self):
"""
Submit all the requests in the pool. The pool must not be submitted yet.
Otherwise, an exception is raised.
Since wait() automatically calls submit(), there is usually no advantage to calling submit() yourself.
"""
if self._started:
raise RequestPool.RequestPoolError("Can't re-start a RequestPool that was already started.")
try:
# Use copy here because requests may remove themselves from self._requests as they complete.
requests = self._requests.copy()
while requests:
requests.pop().submit()
self._clear_finishing_requests()
except:
self._failed = True
self.clean()
raise
def _transfer_request_to_finishing_queue(self, req, reason, *args):
"""
Called (via a callback) when a request is finished executing,
but not yet done with its callbacks. We mark the state change by
removing it from _requests and adding it to _finishing_requests.
See docstrings in wait() and _clear_finishing_requests() for details.
"""
with self._request_completed_condition:
with self._set_lock:
if not self._failed:
self._requests.remove(req)
self._finishing_requests.add(req)
self._request_completed_condition.notify()
def _clear_finishing_requests(self):
"""
Requests execute in two stages:
(1) the main workload, and
(2) the completion callbacks (i.e. the notify_finished handlers)
Once a request in the pool has completed stage 1, it is added to the
set of 'finishing_requests', which may still be in the middle of stage 2.
In this function, we block() for all requests that have completed stage 1, and then finally discard them.
This way, any RAM consumed by their results is immediately discarded (if possible).
We must call block() on every request in the Pool, for two reasons:
(1) RequestPool.wait() should not return until all requests are
complete (unless some failed), INCLUDING the requests' notify_finished callbacks.
(See the 'Callback Timing Guarantee' in the Request docs.)
(2) If any requests failed, we want their exception to be raised in our own context.
The proper way to do that is to call Request.block() on the failed request.
Since we call Request.block() on all of our requests, we'll definitely see the
exception if there was a failed request.
"""
while self._finishing_requests:
try:
with self._set_lock:
req = self._finishing_requests.pop()
except KeyError:
break
else:
req.block()
def request(self, func):
"""
**Deprecated method**.
Convenience function to construct a request for the given callable and add it to the pool.
"""
self.add( Request(func) )
def clean(self):
"""
Release our handles to all requests in the pool, for cleanup purposes.
There is no need to call this yourself.
"""
with self._set_lock:
self._requests = set()
self._finishing_requests = set()
class RequestPool_SIMPLE(object):
# This simplified version doesn't attempt to be efficient with RAM like the standard version (above).
# It is provided here as a simple reference implementation for comparison and testing purposes.
"""
Convenience class for submitting a batch of requests and waiting until they are all complete.
Requests can not be added to the pool after it has already started.
Not threadsafe (don't add requests from more than one thread).
"""
logger = logging.getLogger(__name__ + ".RequestPool")
def __init__(self):
self._requests = set()
self._started = False
def __len__(self):
return len(self._requests)
def add(self, req):
"""
Add a request to the pool. The pool must not be submitted yet. Otherwise, an exception is raised.
"""
if self._started:
# For now, we forbid this because it would allow some corner cases that we aren't unit-testing yet.
# If this exception blocks a desirable use case, then change this behavior and provide a unit test.
raise RequestPool.RequestPoolError("Attempted to add a request to a pool that was already started!")
self._requests.add(req)
def submit(self):
"""
Submit all the requests in the pool. The pool must not be submitted yet. Otherwise, an exception is raised.
"""
if self._started:
raise RequestPool.RequestPoolError("Can't re-start a RequestPool that was already started.")
for req in self._requests:
req.submit()
def wait(self):
"""
If the pool hasn't been submitted yet, submit it.
Then wait for all requests in the pool to complete in the simplest way possible.
"""
if not self._started:
self.submit()
for req in self._requests:
req.block()
self._requests = set()
def cancel(self):
"""
Cancel all requests in the pool.
"""
for req in self._requests:
req.cancel()
def request(self, func):
"""
**Deprecated method**. Convenience function to construct a request for the given callable and add it to the pool.
"""
self.add( Request(func) )
def clean(self):
"""
Release our handles to all requests in the pool, for cleanup purposes.
"""
self._requests = set()
| lgpl-3.0 | 858,078,878,068,856,300 | 40.636364 | 159 | 0.600015 | false |
sharmaeklavya2/zulip | zerver/management/commands/realm_filters.py | 10 | 2955 | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import RealmFilter, all_realm_filters, get_realm
from zerver.lib.actions import do_add_realm_filter, do_remove_realm_filter
import sys
class Command(BaseCommand):
help = """Create a link filter rule for the specified domain.
NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
RegExp syntax. In addition to JS-compatible syntax, the following features are available:
* Named groups will be converted to numbered groups automatically
* Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
Example: ./manage.py realm_filters --realm=zulip --op=add '#(?P<id>[0-9]{2,8})' 'https://trac.humbughq.com/ticket/%(id)s'
Example: ./manage.py realm_filters --realm=zulip --op=remove '#(?P<id>[0-9]{2,8})'
Example: ./manage.py realm_filters --realm=zulip --op=show
"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('-r', '--realm',
dest='string_id',
type=str,
required=True,
help='The subdomain or string_id of the realm to adjust filters for.')
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('pattern', metavar='<pattern>', type=str, nargs='?', default=None,
help="regular expression to match")
parser.add_argument('url_format_string', metavar='<url pattern>', type=str, nargs='?',
help="format string to substitute")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = get_realm(options["string_id"])
if options["op"] == "show":
print("%s: %s" % (realm.string_id, all_realm_filters().get(realm.id, [])))
sys.exit(0)
pattern = options['pattern']
if not pattern:
self.print_help("./manage.py", "realm_filters")
sys.exit(1)
if options["op"] == "add":
url_format_string = options['url_format_string']
if not url_format_string:
self.print_help("./manage.py", "realm_filters")
sys.exit(1)
do_add_realm_filter(realm, pattern, url_format_string)
sys.exit(0)
elif options["op"] == "remove":
do_remove_realm_filter(realm, pattern=pattern)
sys.exit(0)
else:
self.print_help("./manage.py", "realm_filters")
sys.exit(1)
| apache-2.0 | 3,601,913,119,088,627,700 | 41.826087 | 121 | 0.580034 | false |
googleapis/python-compute | google/cloud/compute_v1/__init__.py | 1 | 109327 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.accelerator_types import AcceleratorTypesClient
from .services.addresses import AddressesClient
from .services.autoscalers import AutoscalersClient
from .services.backend_buckets import BackendBucketsClient
from .services.backend_services import BackendServicesClient
from .services.disks import DisksClient
from .services.disk_types import DiskTypesClient
from .services.external_vpn_gateways import ExternalVpnGatewaysClient
from .services.firewall_policies import FirewallPoliciesClient
from .services.firewalls import FirewallsClient
from .services.forwarding_rules import ForwardingRulesClient
from .services.global_addresses import GlobalAddressesClient
from .services.global_forwarding_rules import GlobalForwardingRulesClient
from .services.global_network_endpoint_groups import GlobalNetworkEndpointGroupsClient
from .services.global_operations import GlobalOperationsClient
from .services.global_organization_operations import GlobalOrganizationOperationsClient
from .services.global_public_delegated_prefixes import (
GlobalPublicDelegatedPrefixesClient,
)
from .services.health_checks import HealthChecksClient
from .services.images import ImagesClient
from .services.instance_group_managers import InstanceGroupManagersClient
from .services.instance_groups import InstanceGroupsClient
from .services.instances import InstancesClient
from .services.instance_templates import InstanceTemplatesClient
from .services.interconnect_attachments import InterconnectAttachmentsClient
from .services.interconnect_locations import InterconnectLocationsClient
from .services.interconnects import InterconnectsClient
from .services.license_codes import LicenseCodesClient
from .services.licenses import LicensesClient
from .services.machine_types import MachineTypesClient
from .services.network_endpoint_groups import NetworkEndpointGroupsClient
from .services.networks import NetworksClient
from .services.node_groups import NodeGroupsClient
from .services.node_templates import NodeTemplatesClient
from .services.node_types import NodeTypesClient
from .services.packet_mirrorings import PacketMirroringsClient
from .services.projects import ProjectsClient
from .services.public_advertised_prefixes import PublicAdvertisedPrefixesClient
from .services.public_delegated_prefixes import PublicDelegatedPrefixesClient
from .services.region_autoscalers import RegionAutoscalersClient
from .services.region_backend_services import RegionBackendServicesClient
from .services.region_commitments import RegionCommitmentsClient
from .services.region_disks import RegionDisksClient
from .services.region_disk_types import RegionDiskTypesClient
from .services.region_health_checks import RegionHealthChecksClient
from .services.region_health_check_services import RegionHealthCheckServicesClient
from .services.region_instance_group_managers import RegionInstanceGroupManagersClient
from .services.region_instance_groups import RegionInstanceGroupsClient
from .services.region_instances import RegionInstancesClient
from .services.region_network_endpoint_groups import RegionNetworkEndpointGroupsClient
from .services.region_notification_endpoints import RegionNotificationEndpointsClient
from .services.region_operations import RegionOperationsClient
from .services.regions import RegionsClient
from .services.region_ssl_certificates import RegionSslCertificatesClient
from .services.region_target_http_proxies import RegionTargetHttpProxiesClient
from .services.region_target_https_proxies import RegionTargetHttpsProxiesClient
from .services.region_url_maps import RegionUrlMapsClient
from .services.reservations import ReservationsClient
from .services.resource_policies import ResourcePoliciesClient
from .services.routers import RoutersClient
from .services.routes import RoutesClient
from .services.security_policies import SecurityPoliciesClient
from .services.snapshots import SnapshotsClient
from .services.ssl_certificates import SslCertificatesClient
from .services.ssl_policies import SslPoliciesClient
from .services.subnetworks import SubnetworksClient
from .services.target_grpc_proxies import TargetGrpcProxiesClient
from .services.target_http_proxies import TargetHttpProxiesClient
from .services.target_https_proxies import TargetHttpsProxiesClient
from .services.target_instances import TargetInstancesClient
from .services.target_pools import TargetPoolsClient
from .services.target_ssl_proxies import TargetSslProxiesClient
from .services.target_tcp_proxies import TargetTcpProxiesClient
from .services.target_vpn_gateways import TargetVpnGatewaysClient
from .services.url_maps import UrlMapsClient
from .services.vpn_gateways import VpnGatewaysClient
from .services.vpn_tunnels import VpnTunnelsClient
from .services.zone_operations import ZoneOperationsClient
from .services.zones import ZonesClient
from .types.compute import AbandonInstancesInstanceGroupManagerRequest
from .types.compute import AbandonInstancesRegionInstanceGroupManagerRequest
from .types.compute import AcceleratorConfig
from .types.compute import Accelerators
from .types.compute import AcceleratorType
from .types.compute import AcceleratorTypeAggregatedList
from .types.compute import AcceleratorTypeList
from .types.compute import AcceleratorTypesScopedList
from .types.compute import AccessConfig
from .types.compute import AddAccessConfigInstanceRequest
from .types.compute import AddAssociationFirewallPolicyRequest
from .types.compute import AddHealthCheckTargetPoolRequest
from .types.compute import AddInstancesInstanceGroupRequest
from .types.compute import AddInstanceTargetPoolRequest
from .types.compute import AddNodesNodeGroupRequest
from .types.compute import AddPeeringNetworkRequest
from .types.compute import AddResourcePoliciesDiskRequest
from .types.compute import AddResourcePoliciesInstanceRequest
from .types.compute import AddResourcePoliciesRegionDiskRequest
from .types.compute import Address
from .types.compute import AddressAggregatedList
from .types.compute import AddressesScopedList
from .types.compute import AddressList
from .types.compute import AddRuleFirewallPolicyRequest
from .types.compute import AddRuleSecurityPolicyRequest
from .types.compute import AddSignedUrlKeyBackendBucketRequest
from .types.compute import AddSignedUrlKeyBackendServiceRequest
from .types.compute import AdvancedMachineFeatures
from .types.compute import AggregatedListAcceleratorTypesRequest
from .types.compute import AggregatedListAddressesRequest
from .types.compute import AggregatedListAutoscalersRequest
from .types.compute import AggregatedListBackendServicesRequest
from .types.compute import AggregatedListDisksRequest
from .types.compute import AggregatedListDiskTypesRequest
from .types.compute import AggregatedListForwardingRulesRequest
from .types.compute import AggregatedListGlobalOperationsRequest
from .types.compute import AggregatedListHealthChecksRequest
from .types.compute import AggregatedListInstanceGroupManagersRequest
from .types.compute import AggregatedListInstanceGroupsRequest
from .types.compute import AggregatedListInstancesRequest
from .types.compute import AggregatedListInterconnectAttachmentsRequest
from .types.compute import AggregatedListMachineTypesRequest
from .types.compute import AggregatedListNetworkEndpointGroupsRequest
from .types.compute import AggregatedListNodeGroupsRequest
from .types.compute import AggregatedListNodeTemplatesRequest
from .types.compute import AggregatedListNodeTypesRequest
from .types.compute import AggregatedListPacketMirroringsRequest
from .types.compute import AggregatedListPublicDelegatedPrefixesRequest
from .types.compute import AggregatedListRegionCommitmentsRequest
from .types.compute import AggregatedListReservationsRequest
from .types.compute import AggregatedListResourcePoliciesRequest
from .types.compute import AggregatedListRoutersRequest
from .types.compute import AggregatedListSslCertificatesRequest
from .types.compute import AggregatedListSubnetworksRequest
from .types.compute import AggregatedListTargetHttpProxiesRequest
from .types.compute import AggregatedListTargetHttpsProxiesRequest
from .types.compute import AggregatedListTargetInstancesRequest
from .types.compute import AggregatedListTargetPoolsRequest
from .types.compute import AggregatedListTargetVpnGatewaysRequest
from .types.compute import AggregatedListUrlMapsRequest
from .types.compute import AggregatedListVpnGatewaysRequest
from .types.compute import AggregatedListVpnTunnelsRequest
from .types.compute import AliasIpRange
from .types.compute import (
AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk,
)
from .types.compute import AllocationSpecificSKUAllocationReservedInstanceProperties
from .types.compute import AllocationSpecificSKUReservation
from .types.compute import Allowed
from .types.compute import ApplyUpdatesToInstancesInstanceGroupManagerRequest
from .types.compute import ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest
from .types.compute import AttachDiskInstanceRequest
from .types.compute import AttachedDisk
from .types.compute import AttachedDiskInitializeParams
from .types.compute import AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest
from .types.compute import AttachNetworkEndpointsNetworkEndpointGroupRequest
from .types.compute import AuditConfig
from .types.compute import AuditLogConfig
from .types.compute import AuthorizationLoggingOptions
from .types.compute import Autoscaler
from .types.compute import AutoscalerAggregatedList
from .types.compute import AutoscalerList
from .types.compute import AutoscalersScopedList
from .types.compute import AutoscalerStatusDetails
from .types.compute import AutoscalingPolicy
from .types.compute import AutoscalingPolicyCpuUtilization
from .types.compute import AutoscalingPolicyCustomMetricUtilization
from .types.compute import AutoscalingPolicyLoadBalancingUtilization
from .types.compute import AutoscalingPolicyScaleInControl
from .types.compute import AutoscalingPolicyScalingSchedule
from .types.compute import Backend
from .types.compute import BackendBucket
from .types.compute import BackendBucketCdnPolicy
from .types.compute import BackendBucketCdnPolicyBypassCacheOnRequestHeader
from .types.compute import BackendBucketCdnPolicyNegativeCachingPolicy
from .types.compute import BackendBucketList
from .types.compute import BackendService
from .types.compute import BackendServiceAggregatedList
from .types.compute import BackendServiceCdnPolicy
from .types.compute import BackendServiceCdnPolicyBypassCacheOnRequestHeader
from .types.compute import BackendServiceCdnPolicyNegativeCachingPolicy
from .types.compute import BackendServiceFailoverPolicy
from .types.compute import BackendServiceGroupHealth
from .types.compute import BackendServiceIAP
from .types.compute import BackendServiceList
from .types.compute import BackendServiceLogConfig
from .types.compute import BackendServiceReference
from .types.compute import BackendServicesScopedList
from .types.compute import Binding
from .types.compute import BulkInsertInstanceRequest
from .types.compute import BulkInsertInstanceResource
from .types.compute import BulkInsertInstanceResourcePerInstanceProperties
from .types.compute import BulkInsertRegionInstanceRequest
from .types.compute import CacheInvalidationRule
from .types.compute import CacheKeyPolicy
from .types.compute import CircuitBreakers
from .types.compute import CloneRulesFirewallPolicyRequest
from .types.compute import Commitment
from .types.compute import CommitmentAggregatedList
from .types.compute import CommitmentList
from .types.compute import CommitmentsScopedList
from .types.compute import Condition
from .types.compute import ConfidentialInstanceConfig
from .types.compute import ConnectionDraining
from .types.compute import ConsistentHashLoadBalancerSettings
from .types.compute import ConsistentHashLoadBalancerSettingsHttpCookie
from .types.compute import CorsPolicy
from .types.compute import CreateInstancesInstanceGroupManagerRequest
from .types.compute import CreateInstancesRegionInstanceGroupManagerRequest
from .types.compute import CreateSnapshotDiskRequest
from .types.compute import CreateSnapshotRegionDiskRequest
from .types.compute import CustomerEncryptionKey
from .types.compute import CustomerEncryptionKeyProtectedDisk
from .types.compute import Data
from .types.compute import DeleteAccessConfigInstanceRequest
from .types.compute import DeleteAddressRequest
from .types.compute import DeleteAutoscalerRequest
from .types.compute import DeleteBackendBucketRequest
from .types.compute import DeleteBackendServiceRequest
from .types.compute import DeleteDiskRequest
from .types.compute import DeleteExternalVpnGatewayRequest
from .types.compute import DeleteFirewallPolicyRequest
from .types.compute import DeleteFirewallRequest
from .types.compute import DeleteForwardingRuleRequest
from .types.compute import DeleteGlobalAddressRequest
from .types.compute import DeleteGlobalForwardingRuleRequest
from .types.compute import DeleteGlobalNetworkEndpointGroupRequest
from .types.compute import DeleteGlobalOperationRequest
from .types.compute import DeleteGlobalOperationResponse
from .types.compute import DeleteGlobalOrganizationOperationRequest
from .types.compute import DeleteGlobalOrganizationOperationResponse
from .types.compute import DeleteGlobalPublicDelegatedPrefixeRequest
from .types.compute import DeleteHealthCheckRequest
from .types.compute import DeleteImageRequest
from .types.compute import DeleteInstanceGroupManagerRequest
from .types.compute import DeleteInstanceGroupRequest
from .types.compute import DeleteInstanceRequest
from .types.compute import DeleteInstancesInstanceGroupManagerRequest
from .types.compute import DeleteInstancesRegionInstanceGroupManagerRequest
from .types.compute import DeleteInstanceTemplateRequest
from .types.compute import DeleteInterconnectAttachmentRequest
from .types.compute import DeleteInterconnectRequest
from .types.compute import DeleteLicenseRequest
from .types.compute import DeleteNetworkEndpointGroupRequest
from .types.compute import DeleteNetworkRequest
from .types.compute import DeleteNodeGroupRequest
from .types.compute import DeleteNodesNodeGroupRequest
from .types.compute import DeleteNodeTemplateRequest
from .types.compute import DeletePacketMirroringRequest
from .types.compute import DeletePerInstanceConfigsInstanceGroupManagerRequest
from .types.compute import DeletePerInstanceConfigsRegionInstanceGroupManagerRequest
from .types.compute import DeletePublicAdvertisedPrefixeRequest
from .types.compute import DeletePublicDelegatedPrefixeRequest
from .types.compute import DeleteRegionAutoscalerRequest
from .types.compute import DeleteRegionBackendServiceRequest
from .types.compute import DeleteRegionDiskRequest
from .types.compute import DeleteRegionHealthCheckRequest
from .types.compute import DeleteRegionHealthCheckServiceRequest
from .types.compute import DeleteRegionInstanceGroupManagerRequest
from .types.compute import DeleteRegionNetworkEndpointGroupRequest
from .types.compute import DeleteRegionNotificationEndpointRequest
from .types.compute import DeleteRegionOperationRequest
from .types.compute import DeleteRegionOperationResponse
from .types.compute import DeleteRegionSslCertificateRequest
from .types.compute import DeleteRegionTargetHttpProxyRequest
from .types.compute import DeleteRegionTargetHttpsProxyRequest
from .types.compute import DeleteRegionUrlMapRequest
from .types.compute import DeleteReservationRequest
from .types.compute import DeleteResourcePolicyRequest
from .types.compute import DeleteRouteRequest
from .types.compute import DeleteRouterRequest
from .types.compute import DeleteSecurityPolicyRequest
from .types.compute import DeleteSignedUrlKeyBackendBucketRequest
from .types.compute import DeleteSignedUrlKeyBackendServiceRequest
from .types.compute import DeleteSnapshotRequest
from .types.compute import DeleteSslCertificateRequest
from .types.compute import DeleteSslPolicyRequest
from .types.compute import DeleteSubnetworkRequest
from .types.compute import DeleteTargetGrpcProxyRequest
from .types.compute import DeleteTargetHttpProxyRequest
from .types.compute import DeleteTargetHttpsProxyRequest
from .types.compute import DeleteTargetInstanceRequest
from .types.compute import DeleteTargetPoolRequest
from .types.compute import DeleteTargetSslProxyRequest
from .types.compute import DeleteTargetTcpProxyRequest
from .types.compute import DeleteTargetVpnGatewayRequest
from .types.compute import DeleteUrlMapRequest
from .types.compute import DeleteVpnGatewayRequest
from .types.compute import DeleteVpnTunnelRequest
from .types.compute import DeleteZoneOperationRequest
from .types.compute import DeleteZoneOperationResponse
from .types.compute import Denied
from .types.compute import DeprecateImageRequest
from .types.compute import DeprecationStatus
from .types.compute import DetachDiskInstanceRequest
from .types.compute import DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest
from .types.compute import DetachNetworkEndpointsNetworkEndpointGroupRequest
from .types.compute import DisableXpnHostProjectRequest
from .types.compute import DisableXpnResourceProjectRequest
from .types.compute import Disk
from .types.compute import DiskAggregatedList
from .types.compute import DiskInstantiationConfig
from .types.compute import DiskList
from .types.compute import DiskMoveRequest
from .types.compute import DisksAddResourcePoliciesRequest
from .types.compute import DisksRemoveResourcePoliciesRequest
from .types.compute import DisksResizeRequest
from .types.compute import DisksScopedList
from .types.compute import DiskType
from .types.compute import DiskTypeAggregatedList
from .types.compute import DiskTypeList
from .types.compute import DiskTypesScopedList
from .types.compute import DisplayDevice
from .types.compute import DistributionPolicy
from .types.compute import DistributionPolicyZoneConfiguration
from .types.compute import Duration
from .types.compute import EnableXpnHostProjectRequest
from .types.compute import EnableXpnResourceProjectRequest
from .types.compute import Error
from .types.compute import Errors
from .types.compute import ExchangedPeeringRoute
from .types.compute import ExchangedPeeringRoutesList
from .types.compute import ExpandIpCidrRangeSubnetworkRequest
from .types.compute import Expr
from .types.compute import ExternalVpnGateway
from .types.compute import ExternalVpnGatewayInterface
from .types.compute import ExternalVpnGatewayList
from .types.compute import FileContentBuffer
from .types.compute import Firewall
from .types.compute import FirewallList
from .types.compute import FirewallLogConfig
from .types.compute import FirewallPoliciesListAssociationsResponse
from .types.compute import FirewallPolicy
from .types.compute import FirewallPolicyAssociation
from .types.compute import FirewallPolicyList
from .types.compute import FirewallPolicyRule
from .types.compute import FirewallPolicyRuleMatcher
from .types.compute import FirewallPolicyRuleMatcherLayer4Config
from .types.compute import FixedOrPercent
from .types.compute import ForwardingRule
from .types.compute import ForwardingRuleAggregatedList
from .types.compute import ForwardingRuleList
from .types.compute import ForwardingRuleReference
from .types.compute import ForwardingRuleServiceDirectoryRegistration
from .types.compute import ForwardingRulesScopedList
from .types.compute import GetAcceleratorTypeRequest
from .types.compute import GetAddressRequest
from .types.compute import GetAssociationFirewallPolicyRequest
from .types.compute import GetAutoscalerRequest
from .types.compute import GetBackendBucketRequest
from .types.compute import GetBackendServiceRequest
from .types.compute import GetDiagnosticsInterconnectRequest
from .types.compute import GetDiskRequest
from .types.compute import GetDiskTypeRequest
from .types.compute import GetEffectiveFirewallsInstanceRequest
from .types.compute import GetEffectiveFirewallsNetworkRequest
from .types.compute import GetExternalVpnGatewayRequest
from .types.compute import GetFirewallPolicyRequest
from .types.compute import GetFirewallRequest
from .types.compute import GetForwardingRuleRequest
from .types.compute import GetFromFamilyImageRequest
from .types.compute import GetGlobalAddressRequest
from .types.compute import GetGlobalForwardingRuleRequest
from .types.compute import GetGlobalNetworkEndpointGroupRequest
from .types.compute import GetGlobalOperationRequest
from .types.compute import GetGlobalOrganizationOperationRequest
from .types.compute import GetGlobalPublicDelegatedPrefixeRequest
from .types.compute import GetGuestAttributesInstanceRequest
from .types.compute import GetHealthBackendServiceRequest
from .types.compute import GetHealthCheckRequest
from .types.compute import GetHealthRegionBackendServiceRequest
from .types.compute import GetHealthTargetPoolRequest
from .types.compute import GetIamPolicyDiskRequest
from .types.compute import GetIamPolicyFirewallPolicyRequest
from .types.compute import GetIamPolicyImageRequest
from .types.compute import GetIamPolicyInstanceRequest
from .types.compute import GetIamPolicyInstanceTemplateRequest
from .types.compute import GetIamPolicyLicenseRequest
from .types.compute import GetIamPolicyNodeGroupRequest
from .types.compute import GetIamPolicyNodeTemplateRequest
from .types.compute import GetIamPolicyRegionDiskRequest
from .types.compute import GetIamPolicyReservationRequest
from .types.compute import GetIamPolicyResourcePolicyRequest
from .types.compute import GetIamPolicySnapshotRequest
from .types.compute import GetIamPolicySubnetworkRequest
from .types.compute import GetImageRequest
from .types.compute import GetInstanceGroupManagerRequest
from .types.compute import GetInstanceGroupRequest
from .types.compute import GetInstanceRequest
from .types.compute import GetInstanceTemplateRequest
from .types.compute import GetInterconnectAttachmentRequest
from .types.compute import GetInterconnectLocationRequest
from .types.compute import GetInterconnectRequest
from .types.compute import GetLicenseCodeRequest
from .types.compute import GetLicenseRequest
from .types.compute import GetMachineTypeRequest
from .types.compute import GetNatMappingInfoRoutersRequest
from .types.compute import GetNetworkEndpointGroupRequest
from .types.compute import GetNetworkRequest
from .types.compute import GetNodeGroupRequest
from .types.compute import GetNodeTemplateRequest
from .types.compute import GetNodeTypeRequest
from .types.compute import GetPacketMirroringRequest
from .types.compute import GetProjectRequest
from .types.compute import GetPublicAdvertisedPrefixeRequest
from .types.compute import GetPublicDelegatedPrefixeRequest
from .types.compute import GetRegionAutoscalerRequest
from .types.compute import GetRegionBackendServiceRequest
from .types.compute import GetRegionCommitmentRequest
from .types.compute import GetRegionDiskRequest
from .types.compute import GetRegionDiskTypeRequest
from .types.compute import GetRegionHealthCheckRequest
from .types.compute import GetRegionHealthCheckServiceRequest
from .types.compute import GetRegionInstanceGroupManagerRequest
from .types.compute import GetRegionInstanceGroupRequest
from .types.compute import GetRegionNetworkEndpointGroupRequest
from .types.compute import GetRegionNotificationEndpointRequest
from .types.compute import GetRegionOperationRequest
from .types.compute import GetRegionRequest
from .types.compute import GetRegionSslCertificateRequest
from .types.compute import GetRegionTargetHttpProxyRequest
from .types.compute import GetRegionTargetHttpsProxyRequest
from .types.compute import GetRegionUrlMapRequest
from .types.compute import GetReservationRequest
from .types.compute import GetResourcePolicyRequest
from .types.compute import GetRouteRequest
from .types.compute import GetRouterRequest
from .types.compute import GetRouterStatusRouterRequest
from .types.compute import GetRuleFirewallPolicyRequest
from .types.compute import GetRuleSecurityPolicyRequest
from .types.compute import GetScreenshotInstanceRequest
from .types.compute import GetSecurityPolicyRequest
from .types.compute import GetSerialPortOutputInstanceRequest
from .types.compute import GetShieldedInstanceIdentityInstanceRequest
from .types.compute import GetSnapshotRequest
from .types.compute import GetSslCertificateRequest
from .types.compute import GetSslPolicyRequest
from .types.compute import GetStatusVpnGatewayRequest
from .types.compute import GetSubnetworkRequest
from .types.compute import GetTargetGrpcProxyRequest
from .types.compute import GetTargetHttpProxyRequest
from .types.compute import GetTargetHttpsProxyRequest
from .types.compute import GetTargetInstanceRequest
from .types.compute import GetTargetPoolRequest
from .types.compute import GetTargetSslProxyRequest
from .types.compute import GetTargetTcpProxyRequest
from .types.compute import GetTargetVpnGatewayRequest
from .types.compute import GetUrlMapRequest
from .types.compute import GetVpnGatewayRequest
from .types.compute import GetVpnTunnelRequest
from .types.compute import GetXpnHostProjectRequest
from .types.compute import GetXpnResourcesProjectsRequest
from .types.compute import GetZoneOperationRequest
from .types.compute import GetZoneRequest
from .types.compute import GlobalNetworkEndpointGroupsAttachEndpointsRequest
from .types.compute import GlobalNetworkEndpointGroupsDetachEndpointsRequest
from .types.compute import GlobalOrganizationSetPolicyRequest
from .types.compute import GlobalSetLabelsRequest
from .types.compute import GlobalSetPolicyRequest
from .types.compute import GRPCHealthCheck
from .types.compute import GuestAttributes
from .types.compute import GuestAttributesEntry
from .types.compute import GuestAttributesValue
from .types.compute import GuestOsFeature
from .types.compute import HealthCheck
from .types.compute import HealthCheckList
from .types.compute import HealthCheckLogConfig
from .types.compute import HealthCheckReference
from .types.compute import HealthChecksAggregatedList
from .types.compute import HealthCheckService
from .types.compute import HealthCheckServiceReference
from .types.compute import HealthCheckServicesList
from .types.compute import HealthChecksScopedList
from .types.compute import HealthStatus
from .types.compute import HealthStatusForNetworkEndpoint
from .types.compute import HostRule
from .types.compute import HTTP2HealthCheck
from .types.compute import HttpFaultAbort
from .types.compute import HttpFaultDelay
from .types.compute import HttpFaultInjection
from .types.compute import HttpHeaderAction
from .types.compute import HttpHeaderMatch
from .types.compute import HttpHeaderOption
from .types.compute import HTTPHealthCheck
from .types.compute import HttpQueryParameterMatch
from .types.compute import HttpRedirectAction
from .types.compute import HttpRetryPolicy
from .types.compute import HttpRouteAction
from .types.compute import HttpRouteRule
from .types.compute import HttpRouteRuleMatch
from .types.compute import HTTPSHealthCheck
from .types.compute import Image
from .types.compute import ImageList
from .types.compute import InitialStateConfig
from .types.compute import InsertAddressRequest
from .types.compute import InsertAutoscalerRequest
from .types.compute import InsertBackendBucketRequest
from .types.compute import InsertBackendServiceRequest
from .types.compute import InsertDiskRequest
from .types.compute import InsertExternalVpnGatewayRequest
from .types.compute import InsertFirewallPolicyRequest
from .types.compute import InsertFirewallRequest
from .types.compute import InsertForwardingRuleRequest
from .types.compute import InsertGlobalAddressRequest
from .types.compute import InsertGlobalForwardingRuleRequest
from .types.compute import InsertGlobalNetworkEndpointGroupRequest
from .types.compute import InsertGlobalPublicDelegatedPrefixeRequest
from .types.compute import InsertHealthCheckRequest
from .types.compute import InsertImageRequest
from .types.compute import InsertInstanceGroupManagerRequest
from .types.compute import InsertInstanceGroupRequest
from .types.compute import InsertInstanceRequest
from .types.compute import InsertInstanceTemplateRequest
from .types.compute import InsertInterconnectAttachmentRequest
from .types.compute import InsertInterconnectRequest
from .types.compute import InsertLicenseRequest
from .types.compute import InsertNetworkEndpointGroupRequest
from .types.compute import InsertNetworkRequest
from .types.compute import InsertNodeGroupRequest
from .types.compute import InsertNodeTemplateRequest
from .types.compute import InsertPacketMirroringRequest
from .types.compute import InsertPublicAdvertisedPrefixeRequest
from .types.compute import InsertPublicDelegatedPrefixeRequest
from .types.compute import InsertRegionAutoscalerRequest
from .types.compute import InsertRegionBackendServiceRequest
from .types.compute import InsertRegionCommitmentRequest
from .types.compute import InsertRegionDiskRequest
from .types.compute import InsertRegionHealthCheckRequest
from .types.compute import InsertRegionHealthCheckServiceRequest
from .types.compute import InsertRegionInstanceGroupManagerRequest
from .types.compute import InsertRegionNetworkEndpointGroupRequest
from .types.compute import InsertRegionNotificationEndpointRequest
from .types.compute import InsertRegionSslCertificateRequest
from .types.compute import InsertRegionTargetHttpProxyRequest
from .types.compute import InsertRegionTargetHttpsProxyRequest
from .types.compute import InsertRegionUrlMapRequest
from .types.compute import InsertReservationRequest
from .types.compute import InsertResourcePolicyRequest
from .types.compute import InsertRouteRequest
from .types.compute import InsertRouterRequest
from .types.compute import InsertSecurityPolicyRequest
from .types.compute import InsertSslCertificateRequest
from .types.compute import InsertSslPolicyRequest
from .types.compute import InsertSubnetworkRequest
from .types.compute import InsertTargetGrpcProxyRequest
from .types.compute import InsertTargetHttpProxyRequest
from .types.compute import InsertTargetHttpsProxyRequest
from .types.compute import InsertTargetInstanceRequest
from .types.compute import InsertTargetPoolRequest
from .types.compute import InsertTargetSslProxyRequest
from .types.compute import InsertTargetTcpProxyRequest
from .types.compute import InsertTargetVpnGatewayRequest
from .types.compute import InsertUrlMapRequest
from .types.compute import InsertVpnGatewayRequest
from .types.compute import InsertVpnTunnelRequest
from .types.compute import Instance
from .types.compute import InstanceAggregatedList
from .types.compute import InstanceGroup
from .types.compute import InstanceGroupAggregatedList
from .types.compute import InstanceGroupList
from .types.compute import InstanceGroupManager
from .types.compute import InstanceGroupManagerActionsSummary
from .types.compute import InstanceGroupManagerAggregatedList
from .types.compute import InstanceGroupManagerAutoHealingPolicy
from .types.compute import InstanceGroupManagerList
from .types.compute import InstanceGroupManagersAbandonInstancesRequest
from .types.compute import InstanceGroupManagersApplyUpdatesRequest
from .types.compute import InstanceGroupManagersCreateInstancesRequest
from .types.compute import InstanceGroupManagersDeleteInstancesRequest
from .types.compute import InstanceGroupManagersDeletePerInstanceConfigsReq
from .types.compute import InstanceGroupManagersListErrorsResponse
from .types.compute import InstanceGroupManagersListManagedInstancesResponse
from .types.compute import InstanceGroupManagersListPerInstanceConfigsResp
from .types.compute import InstanceGroupManagersPatchPerInstanceConfigsReq
from .types.compute import InstanceGroupManagersRecreateInstancesRequest
from .types.compute import InstanceGroupManagersScopedList
from .types.compute import InstanceGroupManagersSetInstanceTemplateRequest
from .types.compute import InstanceGroupManagersSetTargetPoolsRequest
from .types.compute import InstanceGroupManagerStatus
from .types.compute import InstanceGroupManagerStatusStateful
from .types.compute import InstanceGroupManagerStatusStatefulPerInstanceConfigs
from .types.compute import InstanceGroupManagerStatusVersionTarget
from .types.compute import InstanceGroupManagersUpdatePerInstanceConfigsReq
from .types.compute import InstanceGroupManagerUpdatePolicy
from .types.compute import InstanceGroupManagerVersion
from .types.compute import InstanceGroupsAddInstancesRequest
from .types.compute import InstanceGroupsListInstances
from .types.compute import InstanceGroupsListInstancesRequest
from .types.compute import InstanceGroupsRemoveInstancesRequest
from .types.compute import InstanceGroupsScopedList
from .types.compute import InstanceGroupsSetNamedPortsRequest
from .types.compute import InstanceList
from .types.compute import InstanceListReferrers
from .types.compute import InstanceManagedByIgmError
from .types.compute import InstanceManagedByIgmErrorInstanceActionDetails
from .types.compute import InstanceManagedByIgmErrorManagedInstanceError
from .types.compute import InstanceMoveRequest
from .types.compute import InstanceProperties
from .types.compute import InstanceReference
from .types.compute import InstancesAddResourcePoliciesRequest
from .types.compute import InstancesGetEffectiveFirewallsResponse
from .types.compute import InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy
from .types.compute import InstancesRemoveResourcePoliciesRequest
from .types.compute import InstancesScopedList
from .types.compute import InstancesSetLabelsRequest
from .types.compute import InstancesSetMachineResourcesRequest
from .types.compute import InstancesSetMachineTypeRequest
from .types.compute import InstancesSetMinCpuPlatformRequest
from .types.compute import InstancesSetServiceAccountRequest
from .types.compute import InstancesStartWithEncryptionKeyRequest
from .types.compute import InstanceTemplate
from .types.compute import InstanceTemplateList
from .types.compute import InstanceWithNamedPorts
from .types.compute import Int64RangeMatch
from .types.compute import Interconnect
from .types.compute import InterconnectAttachment
from .types.compute import InterconnectAttachmentAggregatedList
from .types.compute import InterconnectAttachmentList
from .types.compute import InterconnectAttachmentPartnerMetadata
from .types.compute import InterconnectAttachmentPrivateInfo
from .types.compute import InterconnectAttachmentsScopedList
from .types.compute import InterconnectCircuitInfo
from .types.compute import InterconnectDiagnostics
from .types.compute import InterconnectDiagnosticsARPEntry
from .types.compute import InterconnectDiagnosticsLinkLACPStatus
from .types.compute import InterconnectDiagnosticsLinkOpticalPower
from .types.compute import InterconnectDiagnosticsLinkStatus
from .types.compute import InterconnectList
from .types.compute import InterconnectLocation
from .types.compute import InterconnectLocationList
from .types.compute import InterconnectLocationRegionInfo
from .types.compute import InterconnectOutageNotification
from .types.compute import InterconnectsGetDiagnosticsResponse
from .types.compute import InvalidateCacheUrlMapRequest
from .types.compute import Items
from .types.compute import License
from .types.compute import LicenseCode
from .types.compute import LicenseCodeLicenseAlias
from .types.compute import LicenseResourceCommitment
from .types.compute import LicenseResourceRequirements
from .types.compute import LicensesListResponse
from .types.compute import ListAcceleratorTypesRequest
from .types.compute import ListAddressesRequest
from .types.compute import ListAssociationsFirewallPolicyRequest
from .types.compute import ListAutoscalersRequest
from .types.compute import ListAvailableFeaturesSslPoliciesRequest
from .types.compute import ListBackendBucketsRequest
from .types.compute import ListBackendServicesRequest
from .types.compute import ListDisksRequest
from .types.compute import ListDiskTypesRequest
from .types.compute import ListErrorsInstanceGroupManagersRequest
from .types.compute import ListErrorsRegionInstanceGroupManagersRequest
from .types.compute import ListExternalVpnGatewaysRequest
from .types.compute import ListFirewallPoliciesRequest
from .types.compute import ListFirewallsRequest
from .types.compute import ListForwardingRulesRequest
from .types.compute import ListGlobalAddressesRequest
from .types.compute import ListGlobalForwardingRulesRequest
from .types.compute import ListGlobalNetworkEndpointGroupsRequest
from .types.compute import ListGlobalOperationsRequest
from .types.compute import ListGlobalOrganizationOperationsRequest
from .types.compute import ListGlobalPublicDelegatedPrefixesRequest
from .types.compute import ListHealthChecksRequest
from .types.compute import ListImagesRequest
from .types.compute import ListInstanceGroupManagersRequest
from .types.compute import ListInstanceGroupsRequest
from .types.compute import ListInstancesInstanceGroupsRequest
from .types.compute import ListInstancesRegionInstanceGroupsRequest
from .types.compute import ListInstancesRequest
from .types.compute import ListInstanceTemplatesRequest
from .types.compute import ListInterconnectAttachmentsRequest
from .types.compute import ListInterconnectLocationsRequest
from .types.compute import ListInterconnectsRequest
from .types.compute import ListLicensesRequest
from .types.compute import ListMachineTypesRequest
from .types.compute import ListManagedInstancesInstanceGroupManagersRequest
from .types.compute import ListManagedInstancesRegionInstanceGroupManagersRequest
from .types.compute import ListNetworkEndpointGroupsRequest
from .types.compute import ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest
from .types.compute import ListNetworkEndpointsNetworkEndpointGroupsRequest
from .types.compute import ListNetworksRequest
from .types.compute import ListNodeGroupsRequest
from .types.compute import ListNodesNodeGroupsRequest
from .types.compute import ListNodeTemplatesRequest
from .types.compute import ListNodeTypesRequest
from .types.compute import ListPacketMirroringsRequest
from .types.compute import ListPeeringRoutesNetworksRequest
from .types.compute import ListPerInstanceConfigsInstanceGroupManagersRequest
from .types.compute import ListPerInstanceConfigsRegionInstanceGroupManagersRequest
from .types.compute import ListPreconfiguredExpressionSetsSecurityPoliciesRequest
from .types.compute import ListPublicAdvertisedPrefixesRequest
from .types.compute import ListPublicDelegatedPrefixesRequest
from .types.compute import ListReferrersInstancesRequest
from .types.compute import ListRegionAutoscalersRequest
from .types.compute import ListRegionBackendServicesRequest
from .types.compute import ListRegionCommitmentsRequest
from .types.compute import ListRegionDisksRequest
from .types.compute import ListRegionDiskTypesRequest
from .types.compute import ListRegionHealthCheckServicesRequest
from .types.compute import ListRegionHealthChecksRequest
from .types.compute import ListRegionInstanceGroupManagersRequest
from .types.compute import ListRegionInstanceGroupsRequest
from .types.compute import ListRegionNetworkEndpointGroupsRequest
from .types.compute import ListRegionNotificationEndpointsRequest
from .types.compute import ListRegionOperationsRequest
from .types.compute import ListRegionsRequest
from .types.compute import ListRegionSslCertificatesRequest
from .types.compute import ListRegionTargetHttpProxiesRequest
from .types.compute import ListRegionTargetHttpsProxiesRequest
from .types.compute import ListRegionUrlMapsRequest
from .types.compute import ListReservationsRequest
from .types.compute import ListResourcePoliciesRequest
from .types.compute import ListRoutersRequest
from .types.compute import ListRoutesRequest
from .types.compute import ListSecurityPoliciesRequest
from .types.compute import ListSnapshotsRequest
from .types.compute import ListSslCertificatesRequest
from .types.compute import ListSslPoliciesRequest
from .types.compute import ListSubnetworksRequest
from .types.compute import ListTargetGrpcProxiesRequest
from .types.compute import ListTargetHttpProxiesRequest
from .types.compute import ListTargetHttpsProxiesRequest
from .types.compute import ListTargetInstancesRequest
from .types.compute import ListTargetPoolsRequest
from .types.compute import ListTargetSslProxiesRequest
from .types.compute import ListTargetTcpProxiesRequest
from .types.compute import ListTargetVpnGatewaysRequest
from .types.compute import ListUrlMapsRequest
from .types.compute import ListUsableSubnetworksRequest
from .types.compute import ListVpnGatewaysRequest
from .types.compute import ListVpnTunnelsRequest
from .types.compute import ListXpnHostsProjectsRequest
from .types.compute import ListZoneOperationsRequest
from .types.compute import ListZonesRequest
from .types.compute import LocalDisk
from .types.compute import LocationPolicy
from .types.compute import LocationPolicyLocation
from .types.compute import LogConfig
from .types.compute import LogConfigCloudAuditOptions
from .types.compute import LogConfigCounterOptions
from .types.compute import LogConfigCounterOptionsCustomField
from .types.compute import LogConfigDataAccessOptions
from .types.compute import MachineType
from .types.compute import MachineTypeAggregatedList
from .types.compute import MachineTypeList
from .types.compute import MachineTypesScopedList
from .types.compute import ManagedInstance
from .types.compute import ManagedInstanceInstanceHealth
from .types.compute import ManagedInstanceLastAttempt
from .types.compute import ManagedInstanceVersion
from .types.compute import Metadata
from .types.compute import MetadataFilter
from .types.compute import MetadataFilterLabelMatch
from .types.compute import MoveDiskProjectRequest
from .types.compute import MoveFirewallPolicyRequest
from .types.compute import MoveInstanceProjectRequest
from .types.compute import NamedPort
from .types.compute import Network
from .types.compute import NetworkEndpoint
from .types.compute import NetworkEndpointGroup
from .types.compute import NetworkEndpointGroupAggregatedList
from .types.compute import NetworkEndpointGroupAppEngine
from .types.compute import NetworkEndpointGroupCloudFunction
from .types.compute import NetworkEndpointGroupCloudRun
from .types.compute import NetworkEndpointGroupList
from .types.compute import NetworkEndpointGroupsAttachEndpointsRequest
from .types.compute import NetworkEndpointGroupsDetachEndpointsRequest
from .types.compute import NetworkEndpointGroupsListEndpointsRequest
from .types.compute import NetworkEndpointGroupsListNetworkEndpoints
from .types.compute import NetworkEndpointGroupsScopedList
from .types.compute import NetworkEndpointWithHealthStatus
from .types.compute import NetworkInterface
from .types.compute import NetworkList
from .types.compute import NetworkPeering
from .types.compute import NetworkRoutingConfig
from .types.compute import NetworksAddPeeringRequest
from .types.compute import NetworksGetEffectiveFirewallsResponse
from .types.compute import NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy
from .types.compute import NetworksRemovePeeringRequest
from .types.compute import NetworksUpdatePeeringRequest
from .types.compute import NodeGroup
from .types.compute import NodeGroupAggregatedList
from .types.compute import NodeGroupAutoscalingPolicy
from .types.compute import NodeGroupList
from .types.compute import NodeGroupMaintenanceWindow
from .types.compute import NodeGroupNode
from .types.compute import NodeGroupsAddNodesRequest
from .types.compute import NodeGroupsDeleteNodesRequest
from .types.compute import NodeGroupsListNodes
from .types.compute import NodeGroupsScopedList
from .types.compute import NodeGroupsSetNodeTemplateRequest
from .types.compute import NodeTemplate
from .types.compute import NodeTemplateAggregatedList
from .types.compute import NodeTemplateList
from .types.compute import NodeTemplateNodeTypeFlexibility
from .types.compute import NodeTemplatesScopedList
from .types.compute import NodeType
from .types.compute import NodeTypeAggregatedList
from .types.compute import NodeTypeList
from .types.compute import NodeTypesScopedList
from .types.compute import NotificationEndpoint
from .types.compute import NotificationEndpointGrpcSettings
from .types.compute import NotificationEndpointList
from .types.compute import Operation
from .types.compute import OperationAggregatedList
from .types.compute import OperationList
from .types.compute import OperationsScopedList
from .types.compute import OutlierDetection
from .types.compute import PacketMirroring
from .types.compute import PacketMirroringAggregatedList
from .types.compute import PacketMirroringFilter
from .types.compute import PacketMirroringForwardingRuleInfo
from .types.compute import PacketMirroringList
from .types.compute import PacketMirroringMirroredResourceInfo
from .types.compute import PacketMirroringMirroredResourceInfoInstanceInfo
from .types.compute import PacketMirroringMirroredResourceInfoSubnetInfo
from .types.compute import PacketMirroringNetworkInfo
from .types.compute import PacketMirroringsScopedList
from .types.compute import PatchAutoscalerRequest
from .types.compute import PatchBackendBucketRequest
from .types.compute import PatchBackendServiceRequest
from .types.compute import PatchFirewallPolicyRequest
from .types.compute import PatchFirewallRequest
from .types.compute import PatchForwardingRuleRequest
from .types.compute import PatchGlobalForwardingRuleRequest
from .types.compute import PatchGlobalPublicDelegatedPrefixeRequest
from .types.compute import PatchHealthCheckRequest
from .types.compute import PatchImageRequest
from .types.compute import PatchInstanceGroupManagerRequest
from .types.compute import PatchInterconnectAttachmentRequest
from .types.compute import PatchInterconnectRequest
from .types.compute import PatchNetworkRequest
from .types.compute import PatchNodeGroupRequest
from .types.compute import PatchPacketMirroringRequest
from .types.compute import PatchPerInstanceConfigsInstanceGroupManagerRequest
from .types.compute import PatchPerInstanceConfigsRegionInstanceGroupManagerRequest
from .types.compute import PatchPublicAdvertisedPrefixeRequest
from .types.compute import PatchPublicDelegatedPrefixeRequest
from .types.compute import PatchRegionAutoscalerRequest
from .types.compute import PatchRegionBackendServiceRequest
from .types.compute import PatchRegionHealthCheckRequest
from .types.compute import PatchRegionHealthCheckServiceRequest
from .types.compute import PatchRegionInstanceGroupManagerRequest
from .types.compute import PatchRegionUrlMapRequest
from .types.compute import PatchRouterRequest
from .types.compute import PatchRuleFirewallPolicyRequest
from .types.compute import PatchRuleSecurityPolicyRequest
from .types.compute import PatchSecurityPolicyRequest
from .types.compute import PatchSslPolicyRequest
from .types.compute import PatchSubnetworkRequest
from .types.compute import PatchTargetGrpcProxyRequest
from .types.compute import PatchTargetHttpProxyRequest
from .types.compute import PatchTargetHttpsProxyRequest
from .types.compute import PatchUrlMapRequest
from .types.compute import PathMatcher
from .types.compute import PathRule
from .types.compute import PerInstanceConfig
from .types.compute import Policy
from .types.compute import PreconfiguredWafSet
from .types.compute import PreservedState
from .types.compute import PreservedStatePreservedDisk
from .types.compute import PreviewRouterRequest
from .types.compute import Project
from .types.compute import ProjectsDisableXpnResourceRequest
from .types.compute import ProjectsEnableXpnResourceRequest
from .types.compute import ProjectsGetXpnResources
from .types.compute import ProjectsListXpnHostsRequest
from .types.compute import ProjectsSetDefaultNetworkTierRequest
from .types.compute import PublicAdvertisedPrefix
from .types.compute import PublicAdvertisedPrefixList
from .types.compute import PublicAdvertisedPrefixPublicDelegatedPrefix
from .types.compute import PublicDelegatedPrefix
from .types.compute import PublicDelegatedPrefixAggregatedList
from .types.compute import PublicDelegatedPrefixesScopedList
from .types.compute import PublicDelegatedPrefixList
from .types.compute import PublicDelegatedPrefixPublicDelegatedSubPrefix
from .types.compute import Quota
from .types.compute import RawDisk
from .types.compute import RecreateInstancesInstanceGroupManagerRequest
from .types.compute import RecreateInstancesRegionInstanceGroupManagerRequest
from .types.compute import Reference
from .types.compute import Region
from .types.compute import RegionAutoscalerList
from .types.compute import RegionDisksAddResourcePoliciesRequest
from .types.compute import RegionDisksRemoveResourcePoliciesRequest
from .types.compute import RegionDisksResizeRequest
from .types.compute import RegionDiskTypeList
from .types.compute import RegionInstanceGroupList
from .types.compute import RegionInstanceGroupManagerDeleteInstanceConfigReq
from .types.compute import RegionInstanceGroupManagerList
from .types.compute import RegionInstanceGroupManagerPatchInstanceConfigReq
from .types.compute import RegionInstanceGroupManagersAbandonInstancesRequest
from .types.compute import RegionInstanceGroupManagersApplyUpdatesRequest
from .types.compute import RegionInstanceGroupManagersCreateInstancesRequest
from .types.compute import RegionInstanceGroupManagersDeleteInstancesRequest
from .types.compute import RegionInstanceGroupManagersListErrorsResponse
from .types.compute import RegionInstanceGroupManagersListInstanceConfigsResp
from .types.compute import RegionInstanceGroupManagersListInstancesResponse
from .types.compute import RegionInstanceGroupManagersRecreateRequest
from .types.compute import RegionInstanceGroupManagersSetTargetPoolsRequest
from .types.compute import RegionInstanceGroupManagersSetTemplateRequest
from .types.compute import RegionInstanceGroupManagerUpdateInstanceConfigReq
from .types.compute import RegionInstanceGroupsListInstances
from .types.compute import RegionInstanceGroupsListInstancesRequest
from .types.compute import RegionInstanceGroupsSetNamedPortsRequest
from .types.compute import RegionList
from .types.compute import RegionSetLabelsRequest
from .types.compute import RegionSetPolicyRequest
from .types.compute import RegionTargetHttpsProxiesSetSslCertificatesRequest
from .types.compute import RegionUrlMapsValidateRequest
from .types.compute import RemoveAssociationFirewallPolicyRequest
from .types.compute import RemoveHealthCheckTargetPoolRequest
from .types.compute import RemoveInstancesInstanceGroupRequest
from .types.compute import RemoveInstanceTargetPoolRequest
from .types.compute import RemovePeeringNetworkRequest
from .types.compute import RemoveResourcePoliciesDiskRequest
from .types.compute import RemoveResourcePoliciesInstanceRequest
from .types.compute import RemoveResourcePoliciesRegionDiskRequest
from .types.compute import RemoveRuleFirewallPolicyRequest
from .types.compute import RemoveRuleSecurityPolicyRequest
from .types.compute import RequestMirrorPolicy
from .types.compute import Reservation
from .types.compute import ReservationAffinity
from .types.compute import ReservationAggregatedList
from .types.compute import ReservationList
from .types.compute import ReservationsResizeRequest
from .types.compute import ReservationsScopedList
from .types.compute import ResetInstanceRequest
from .types.compute import ResizeDiskRequest
from .types.compute import ResizeInstanceGroupManagerRequest
from .types.compute import ResizeRegionDiskRequest
from .types.compute import ResizeRegionInstanceGroupManagerRequest
from .types.compute import ResizeReservationRequest
from .types.compute import ResourceCommitment
from .types.compute import ResourceGroupReference
from .types.compute import ResourcePoliciesScopedList
from .types.compute import ResourcePolicy
from .types.compute import ResourcePolicyAggregatedList
from .types.compute import ResourcePolicyDailyCycle
from .types.compute import ResourcePolicyGroupPlacementPolicy
from .types.compute import ResourcePolicyHourlyCycle
from .types.compute import ResourcePolicyInstanceSchedulePolicy
from .types.compute import ResourcePolicyInstanceSchedulePolicySchedule
from .types.compute import ResourcePolicyList
from .types.compute import ResourcePolicyResourceStatus
from .types.compute import ResourcePolicyResourceStatusInstanceSchedulePolicyStatus
from .types.compute import ResourcePolicySnapshotSchedulePolicy
from .types.compute import ResourcePolicySnapshotSchedulePolicyRetentionPolicy
from .types.compute import ResourcePolicySnapshotSchedulePolicySchedule
from .types.compute import ResourcePolicySnapshotSchedulePolicySnapshotProperties
from .types.compute import ResourcePolicyWeeklyCycle
from .types.compute import ResourcePolicyWeeklyCycleDayOfWeek
from .types.compute import Route
from .types.compute import RouteList
from .types.compute import Router
from .types.compute import RouterAdvertisedIpRange
from .types.compute import RouterAggregatedList
from .types.compute import RouterBgp
from .types.compute import RouterBgpPeer
from .types.compute import RouterInterface
from .types.compute import RouterList
from .types.compute import RouterNat
from .types.compute import RouterNatLogConfig
from .types.compute import RouterNatSubnetworkToNat
from .types.compute import RoutersPreviewResponse
from .types.compute import RoutersScopedList
from .types.compute import RouterStatus
from .types.compute import RouterStatusBgpPeerStatus
from .types.compute import RouterStatusNatStatus
from .types.compute import RouterStatusResponse
from .types.compute import Rule
from .types.compute import ScalingScheduleStatus
from .types.compute import Scheduling
from .types.compute import SchedulingNodeAffinity
from .types.compute import ScratchDisks
from .types.compute import Screenshot
from .types.compute import SecurityPoliciesListPreconfiguredExpressionSetsResponse
from .types.compute import SecurityPoliciesWafConfig
from .types.compute import SecurityPolicy
from .types.compute import SecurityPolicyList
from .types.compute import SecurityPolicyReference
from .types.compute import SecurityPolicyRule
from .types.compute import SecurityPolicyRuleMatcher
from .types.compute import SecurityPolicyRuleMatcherConfig
from .types.compute import SecuritySettings
from .types.compute import SerialPortOutput
from .types.compute import ServerBinding
from .types.compute import ServiceAccount
from .types.compute import SetBackendServiceTargetSslProxyRequest
from .types.compute import SetBackendServiceTargetTcpProxyRequest
from .types.compute import SetBackupTargetPoolRequest
from .types.compute import SetCommonInstanceMetadataProjectRequest
from .types.compute import SetDefaultNetworkTierProjectRequest
from .types.compute import SetDeletionProtectionInstanceRequest
from .types.compute import SetDiskAutoDeleteInstanceRequest
from .types.compute import SetIamPolicyDiskRequest
from .types.compute import SetIamPolicyFirewallPolicyRequest
from .types.compute import SetIamPolicyImageRequest
from .types.compute import SetIamPolicyInstanceRequest
from .types.compute import SetIamPolicyInstanceTemplateRequest
from .types.compute import SetIamPolicyLicenseRequest
from .types.compute import SetIamPolicyNodeGroupRequest
from .types.compute import SetIamPolicyNodeTemplateRequest
from .types.compute import SetIamPolicyRegionDiskRequest
from .types.compute import SetIamPolicyReservationRequest
from .types.compute import SetIamPolicyResourcePolicyRequest
from .types.compute import SetIamPolicySnapshotRequest
from .types.compute import SetIamPolicySubnetworkRequest
from .types.compute import SetInstanceTemplateInstanceGroupManagerRequest
from .types.compute import SetInstanceTemplateRegionInstanceGroupManagerRequest
from .types.compute import SetLabelsDiskRequest
from .types.compute import SetLabelsExternalVpnGatewayRequest
from .types.compute import SetLabelsForwardingRuleRequest
from .types.compute import SetLabelsGlobalForwardingRuleRequest
from .types.compute import SetLabelsImageRequest
from .types.compute import SetLabelsInstanceRequest
from .types.compute import SetLabelsRegionDiskRequest
from .types.compute import SetLabelsSnapshotRequest
from .types.compute import SetLabelsVpnGatewayRequest
from .types.compute import SetMachineResourcesInstanceRequest
from .types.compute import SetMachineTypeInstanceRequest
from .types.compute import SetMetadataInstanceRequest
from .types.compute import SetMinCpuPlatformInstanceRequest
from .types.compute import SetNamedPortsInstanceGroupRequest
from .types.compute import SetNamedPortsRegionInstanceGroupRequest
from .types.compute import SetNodeTemplateNodeGroupRequest
from .types.compute import SetPrivateIpGoogleAccessSubnetworkRequest
from .types.compute import SetProxyHeaderTargetSslProxyRequest
from .types.compute import SetProxyHeaderTargetTcpProxyRequest
from .types.compute import SetQuicOverrideTargetHttpsProxyRequest
from .types.compute import SetSchedulingInstanceRequest
from .types.compute import SetSecurityPolicyBackendServiceRequest
from .types.compute import SetServiceAccountInstanceRequest
from .types.compute import SetShieldedInstanceIntegrityPolicyInstanceRequest
from .types.compute import SetSslCertificatesRegionTargetHttpsProxyRequest
from .types.compute import SetSslCertificatesTargetHttpsProxyRequest
from .types.compute import SetSslCertificatesTargetSslProxyRequest
from .types.compute import SetSslPolicyTargetHttpsProxyRequest
from .types.compute import SetSslPolicyTargetSslProxyRequest
from .types.compute import SetTagsInstanceRequest
from .types.compute import SetTargetForwardingRuleRequest
from .types.compute import SetTargetGlobalForwardingRuleRequest
from .types.compute import SetTargetPoolsInstanceGroupManagerRequest
from .types.compute import SetTargetPoolsRegionInstanceGroupManagerRequest
from .types.compute import SetUrlMapRegionTargetHttpProxyRequest
from .types.compute import SetUrlMapRegionTargetHttpsProxyRequest
from .types.compute import SetUrlMapTargetHttpProxyRequest
from .types.compute import SetUrlMapTargetHttpsProxyRequest
from .types.compute import SetUsageExportBucketProjectRequest
from .types.compute import ShieldedInstanceConfig
from .types.compute import ShieldedInstanceIdentity
from .types.compute import ShieldedInstanceIdentityEntry
from .types.compute import ShieldedInstanceIntegrityPolicy
from .types.compute import SignedUrlKey
from .types.compute import SimulateMaintenanceEventInstanceRequest
from .types.compute import Snapshot
from .types.compute import SnapshotList
from .types.compute import SourceInstanceParams
from .types.compute import SslCertificate
from .types.compute import SslCertificateAggregatedList
from .types.compute import SslCertificateList
from .types.compute import SslCertificateManagedSslCertificate
from .types.compute import SslCertificateSelfManagedSslCertificate
from .types.compute import SslCertificatesScopedList
from .types.compute import SSLHealthCheck
from .types.compute import SslPoliciesList
from .types.compute import SslPoliciesListAvailableFeaturesResponse
from .types.compute import SslPolicy
from .types.compute import SslPolicyReference
from .types.compute import StartInstanceRequest
from .types.compute import StartWithEncryptionKeyInstanceRequest
from .types.compute import StatefulPolicy
from .types.compute import StatefulPolicyPreservedState
from .types.compute import StatefulPolicyPreservedStateDiskDevice
from .types.compute import StopInstanceRequest
from .types.compute import Subnetwork
from .types.compute import SubnetworkAggregatedList
from .types.compute import SubnetworkList
from .types.compute import SubnetworkLogConfig
from .types.compute import SubnetworkSecondaryRange
from .types.compute import SubnetworksExpandIpCidrRangeRequest
from .types.compute import SubnetworksScopedList
from .types.compute import SubnetworksSetPrivateIpGoogleAccessRequest
from .types.compute import SwitchToCustomModeNetworkRequest
from .types.compute import Tags
from .types.compute import TargetGrpcProxy
from .types.compute import TargetGrpcProxyList
from .types.compute import TargetHttpProxiesScopedList
from .types.compute import TargetHttpProxy
from .types.compute import TargetHttpProxyAggregatedList
from .types.compute import TargetHttpProxyList
from .types.compute import TargetHttpsProxiesScopedList
from .types.compute import TargetHttpsProxiesSetQuicOverrideRequest
from .types.compute import TargetHttpsProxiesSetSslCertificatesRequest
from .types.compute import TargetHttpsProxy
from .types.compute import TargetHttpsProxyAggregatedList
from .types.compute import TargetHttpsProxyList
from .types.compute import TargetInstance
from .types.compute import TargetInstanceAggregatedList
from .types.compute import TargetInstanceList
from .types.compute import TargetInstancesScopedList
from .types.compute import TargetPool
from .types.compute import TargetPoolAggregatedList
from .types.compute import TargetPoolInstanceHealth
from .types.compute import TargetPoolList
from .types.compute import TargetPoolsAddHealthCheckRequest
from .types.compute import TargetPoolsAddInstanceRequest
from .types.compute import TargetPoolsRemoveHealthCheckRequest
from .types.compute import TargetPoolsRemoveInstanceRequest
from .types.compute import TargetPoolsScopedList
from .types.compute import TargetReference
from .types.compute import TargetSslProxiesSetBackendServiceRequest
from .types.compute import TargetSslProxiesSetProxyHeaderRequest
from .types.compute import TargetSslProxiesSetSslCertificatesRequest
from .types.compute import TargetSslProxy
from .types.compute import TargetSslProxyList
from .types.compute import TargetTcpProxiesSetBackendServiceRequest
from .types.compute import TargetTcpProxiesSetProxyHeaderRequest
from .types.compute import TargetTcpProxy
from .types.compute import TargetTcpProxyList
from .types.compute import TargetVpnGateway
from .types.compute import TargetVpnGatewayAggregatedList
from .types.compute import TargetVpnGatewayList
from .types.compute import TargetVpnGatewaysScopedList
from .types.compute import TCPHealthCheck
from .types.compute import TestFailure
from .types.compute import TestIamPermissionsDiskRequest
from .types.compute import TestIamPermissionsExternalVpnGatewayRequest
from .types.compute import TestIamPermissionsFirewallPolicyRequest
from .types.compute import TestIamPermissionsImageRequest
from .types.compute import TestIamPermissionsInstanceRequest
from .types.compute import TestIamPermissionsInstanceTemplateRequest
from .types.compute import TestIamPermissionsLicenseCodeRequest
from .types.compute import TestIamPermissionsLicenseRequest
from .types.compute import TestIamPermissionsNetworkEndpointGroupRequest
from .types.compute import TestIamPermissionsNodeGroupRequest
from .types.compute import TestIamPermissionsNodeTemplateRequest
from .types.compute import TestIamPermissionsPacketMirroringRequest
from .types.compute import TestIamPermissionsRegionDiskRequest
from .types.compute import TestIamPermissionsReservationRequest
from .types.compute import TestIamPermissionsResourcePolicyRequest
from .types.compute import TestIamPermissionsSnapshotRequest
from .types.compute import TestIamPermissionsSubnetworkRequest
from .types.compute import TestIamPermissionsVpnGatewayRequest
from .types.compute import TestPermissionsRequest
from .types.compute import TestPermissionsResponse
from .types.compute import UpdateAccessConfigInstanceRequest
from .types.compute import UpdateAutoscalerRequest
from .types.compute import UpdateBackendBucketRequest
from .types.compute import UpdateBackendServiceRequest
from .types.compute import UpdateDisplayDeviceInstanceRequest
from .types.compute import UpdateFirewallRequest
from .types.compute import UpdateHealthCheckRequest
from .types.compute import UpdateInstanceRequest
from .types.compute import UpdateNetworkInterfaceInstanceRequest
from .types.compute import UpdatePeeringNetworkRequest
from .types.compute import UpdatePerInstanceConfigsInstanceGroupManagerRequest
from .types.compute import UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest
from .types.compute import UpdateRegionAutoscalerRequest
from .types.compute import UpdateRegionBackendServiceRequest
from .types.compute import UpdateRegionHealthCheckRequest
from .types.compute import UpdateRegionUrlMapRequest
from .types.compute import UpdateRouterRequest
from .types.compute import UpdateShieldedInstanceConfigInstanceRequest
from .types.compute import UpdateUrlMapRequest
from .types.compute import UrlMap
from .types.compute import UrlMapList
from .types.compute import UrlMapReference
from .types.compute import UrlMapsAggregatedList
from .types.compute import UrlMapsScopedList
from .types.compute import UrlMapsValidateRequest
from .types.compute import UrlMapsValidateResponse
from .types.compute import UrlMapTest
from .types.compute import UrlMapTestHeader
from .types.compute import UrlMapValidationResult
from .types.compute import UrlRewrite
from .types.compute import UsableSubnetwork
from .types.compute import UsableSubnetworksAggregatedList
from .types.compute import UsableSubnetworkSecondaryRange
from .types.compute import UsageExportLocation
from .types.compute import ValidateRegionUrlMapRequest
from .types.compute import ValidateUrlMapRequest
from .types.compute import VmEndpointNatMappings
from .types.compute import VmEndpointNatMappingsInterfaceNatMappings
from .types.compute import VmEndpointNatMappingsList
from .types.compute import VpnGateway
from .types.compute import VpnGatewayAggregatedList
from .types.compute import VpnGatewayList
from .types.compute import VpnGatewaysGetStatusResponse
from .types.compute import VpnGatewaysScopedList
from .types.compute import VpnGatewayStatus
from .types.compute import VpnGatewayStatusHighAvailabilityRequirementState
from .types.compute import VpnGatewayStatusTunnel
from .types.compute import VpnGatewayStatusVpnConnection
from .types.compute import VpnGatewayVpnGatewayInterface
from .types.compute import VpnTunnel
from .types.compute import VpnTunnelAggregatedList
from .types.compute import VpnTunnelList
from .types.compute import VpnTunnelsScopedList
from .types.compute import WafExpressionSet
from .types.compute import WafExpressionSetExpression
from .types.compute import WaitGlobalOperationRequest
from .types.compute import WaitRegionOperationRequest
from .types.compute import WaitZoneOperationRequest
from .types.compute import Warning
from .types.compute import Warnings
from .types.compute import WeightedBackendService
from .types.compute import XpnHostList
from .types.compute import XpnResourceId
from .types.compute import Zone
from .types.compute import ZoneList
from .types.compute import ZoneSetLabelsRequest
from .types.compute import ZoneSetPolicyRequest
__all__ = (
"AbandonInstancesInstanceGroupManagerRequest",
"AbandonInstancesRegionInstanceGroupManagerRequest",
"AcceleratorConfig",
"AcceleratorType",
"AcceleratorTypeAggregatedList",
"AcceleratorTypeList",
"AcceleratorTypesClient",
"AcceleratorTypesScopedList",
"Accelerators",
"AccessConfig",
"AddAccessConfigInstanceRequest",
"AddAssociationFirewallPolicyRequest",
"AddHealthCheckTargetPoolRequest",
"AddInstanceTargetPoolRequest",
"AddInstancesInstanceGroupRequest",
"AddNodesNodeGroupRequest",
"AddPeeringNetworkRequest",
"AddResourcePoliciesDiskRequest",
"AddResourcePoliciesInstanceRequest",
"AddResourcePoliciesRegionDiskRequest",
"AddRuleFirewallPolicyRequest",
"AddRuleSecurityPolicyRequest",
"AddSignedUrlKeyBackendBucketRequest",
"AddSignedUrlKeyBackendServiceRequest",
"Address",
"AddressAggregatedList",
"AddressList",
"AddressesClient",
"AddressesScopedList",
"AdvancedMachineFeatures",
"AggregatedListAcceleratorTypesRequest",
"AggregatedListAddressesRequest",
"AggregatedListAutoscalersRequest",
"AggregatedListBackendServicesRequest",
"AggregatedListDiskTypesRequest",
"AggregatedListDisksRequest",
"AggregatedListForwardingRulesRequest",
"AggregatedListGlobalOperationsRequest",
"AggregatedListHealthChecksRequest",
"AggregatedListInstanceGroupManagersRequest",
"AggregatedListInstanceGroupsRequest",
"AggregatedListInstancesRequest",
"AggregatedListInterconnectAttachmentsRequest",
"AggregatedListMachineTypesRequest",
"AggregatedListNetworkEndpointGroupsRequest",
"AggregatedListNodeGroupsRequest",
"AggregatedListNodeTemplatesRequest",
"AggregatedListNodeTypesRequest",
"AggregatedListPacketMirroringsRequest",
"AggregatedListPublicDelegatedPrefixesRequest",
"AggregatedListRegionCommitmentsRequest",
"AggregatedListReservationsRequest",
"AggregatedListResourcePoliciesRequest",
"AggregatedListRoutersRequest",
"AggregatedListSslCertificatesRequest",
"AggregatedListSubnetworksRequest",
"AggregatedListTargetHttpProxiesRequest",
"AggregatedListTargetHttpsProxiesRequest",
"AggregatedListTargetInstancesRequest",
"AggregatedListTargetPoolsRequest",
"AggregatedListTargetVpnGatewaysRequest",
"AggregatedListUrlMapsRequest",
"AggregatedListVpnGatewaysRequest",
"AggregatedListVpnTunnelsRequest",
"AliasIpRange",
"AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk",
"AllocationSpecificSKUAllocationReservedInstanceProperties",
"AllocationSpecificSKUReservation",
"Allowed",
"ApplyUpdatesToInstancesInstanceGroupManagerRequest",
"ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest",
"AttachDiskInstanceRequest",
"AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest",
"AttachNetworkEndpointsNetworkEndpointGroupRequest",
"AttachedDisk",
"AttachedDiskInitializeParams",
"AuditConfig",
"AuditLogConfig",
"AuthorizationLoggingOptions",
"Autoscaler",
"AutoscalerAggregatedList",
"AutoscalerList",
"AutoscalerStatusDetails",
"AutoscalersClient",
"AutoscalersScopedList",
"AutoscalingPolicy",
"AutoscalingPolicyCpuUtilization",
"AutoscalingPolicyCustomMetricUtilization",
"AutoscalingPolicyLoadBalancingUtilization",
"AutoscalingPolicyScaleInControl",
"AutoscalingPolicyScalingSchedule",
"Backend",
"BackendBucket",
"BackendBucketCdnPolicy",
"BackendBucketCdnPolicyBypassCacheOnRequestHeader",
"BackendBucketCdnPolicyNegativeCachingPolicy",
"BackendBucketList",
"BackendBucketsClient",
"BackendService",
"BackendServiceAggregatedList",
"BackendServiceCdnPolicy",
"BackendServiceCdnPolicyBypassCacheOnRequestHeader",
"BackendServiceCdnPolicyNegativeCachingPolicy",
"BackendServiceFailoverPolicy",
"BackendServiceGroupHealth",
"BackendServiceIAP",
"BackendServiceList",
"BackendServiceLogConfig",
"BackendServiceReference",
"BackendServicesClient",
"BackendServicesScopedList",
"Binding",
"BulkInsertInstanceRequest",
"BulkInsertInstanceResource",
"BulkInsertInstanceResourcePerInstanceProperties",
"BulkInsertRegionInstanceRequest",
"CacheInvalidationRule",
"CacheKeyPolicy",
"CircuitBreakers",
"CloneRulesFirewallPolicyRequest",
"Commitment",
"CommitmentAggregatedList",
"CommitmentList",
"CommitmentsScopedList",
"Condition",
"ConfidentialInstanceConfig",
"ConnectionDraining",
"ConsistentHashLoadBalancerSettings",
"ConsistentHashLoadBalancerSettingsHttpCookie",
"CorsPolicy",
"CreateInstancesInstanceGroupManagerRequest",
"CreateInstancesRegionInstanceGroupManagerRequest",
"CreateSnapshotDiskRequest",
"CreateSnapshotRegionDiskRequest",
"CustomerEncryptionKey",
"CustomerEncryptionKeyProtectedDisk",
"Data",
"DeleteAccessConfigInstanceRequest",
"DeleteAddressRequest",
"DeleteAutoscalerRequest",
"DeleteBackendBucketRequest",
"DeleteBackendServiceRequest",
"DeleteDiskRequest",
"DeleteExternalVpnGatewayRequest",
"DeleteFirewallPolicyRequest",
"DeleteFirewallRequest",
"DeleteForwardingRuleRequest",
"DeleteGlobalAddressRequest",
"DeleteGlobalForwardingRuleRequest",
"DeleteGlobalNetworkEndpointGroupRequest",
"DeleteGlobalOperationRequest",
"DeleteGlobalOperationResponse",
"DeleteGlobalOrganizationOperationRequest",
"DeleteGlobalOrganizationOperationResponse",
"DeleteGlobalPublicDelegatedPrefixeRequest",
"DeleteHealthCheckRequest",
"DeleteImageRequest",
"DeleteInstanceGroupManagerRequest",
"DeleteInstanceGroupRequest",
"DeleteInstanceRequest",
"DeleteInstanceTemplateRequest",
"DeleteInstancesInstanceGroupManagerRequest",
"DeleteInstancesRegionInstanceGroupManagerRequest",
"DeleteInterconnectAttachmentRequest",
"DeleteInterconnectRequest",
"DeleteLicenseRequest",
"DeleteNetworkEndpointGroupRequest",
"DeleteNetworkRequest",
"DeleteNodeGroupRequest",
"DeleteNodeTemplateRequest",
"DeleteNodesNodeGroupRequest",
"DeletePacketMirroringRequest",
"DeletePerInstanceConfigsInstanceGroupManagerRequest",
"DeletePerInstanceConfigsRegionInstanceGroupManagerRequest",
"DeletePublicAdvertisedPrefixeRequest",
"DeletePublicDelegatedPrefixeRequest",
"DeleteRegionAutoscalerRequest",
"DeleteRegionBackendServiceRequest",
"DeleteRegionDiskRequest",
"DeleteRegionHealthCheckRequest",
"DeleteRegionHealthCheckServiceRequest",
"DeleteRegionInstanceGroupManagerRequest",
"DeleteRegionNetworkEndpointGroupRequest",
"DeleteRegionNotificationEndpointRequest",
"DeleteRegionOperationRequest",
"DeleteRegionOperationResponse",
"DeleteRegionSslCertificateRequest",
"DeleteRegionTargetHttpProxyRequest",
"DeleteRegionTargetHttpsProxyRequest",
"DeleteRegionUrlMapRequest",
"DeleteReservationRequest",
"DeleteResourcePolicyRequest",
"DeleteRouteRequest",
"DeleteRouterRequest",
"DeleteSecurityPolicyRequest",
"DeleteSignedUrlKeyBackendBucketRequest",
"DeleteSignedUrlKeyBackendServiceRequest",
"DeleteSnapshotRequest",
"DeleteSslCertificateRequest",
"DeleteSslPolicyRequest",
"DeleteSubnetworkRequest",
"DeleteTargetGrpcProxyRequest",
"DeleteTargetHttpProxyRequest",
"DeleteTargetHttpsProxyRequest",
"DeleteTargetInstanceRequest",
"DeleteTargetPoolRequest",
"DeleteTargetSslProxyRequest",
"DeleteTargetTcpProxyRequest",
"DeleteTargetVpnGatewayRequest",
"DeleteUrlMapRequest",
"DeleteVpnGatewayRequest",
"DeleteVpnTunnelRequest",
"DeleteZoneOperationRequest",
"DeleteZoneOperationResponse",
"Denied",
"DeprecateImageRequest",
"DeprecationStatus",
"DetachDiskInstanceRequest",
"DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest",
"DetachNetworkEndpointsNetworkEndpointGroupRequest",
"DisableXpnHostProjectRequest",
"DisableXpnResourceProjectRequest",
"Disk",
"DiskAggregatedList",
"DiskInstantiationConfig",
"DiskList",
"DiskMoveRequest",
"DiskType",
"DiskTypeAggregatedList",
"DiskTypeList",
"DiskTypesClient",
"DiskTypesScopedList",
"DisksAddResourcePoliciesRequest",
"DisksClient",
"DisksRemoveResourcePoliciesRequest",
"DisksResizeRequest",
"DisksScopedList",
"DisplayDevice",
"DistributionPolicy",
"DistributionPolicyZoneConfiguration",
"Duration",
"EnableXpnHostProjectRequest",
"EnableXpnResourceProjectRequest",
"Error",
"Errors",
"ExchangedPeeringRoute",
"ExchangedPeeringRoutesList",
"ExpandIpCidrRangeSubnetworkRequest",
"Expr",
"ExternalVpnGateway",
"ExternalVpnGatewayInterface",
"ExternalVpnGatewayList",
"ExternalVpnGatewaysClient",
"FileContentBuffer",
"Firewall",
"FirewallList",
"FirewallLogConfig",
"FirewallPoliciesClient",
"FirewallPoliciesListAssociationsResponse",
"FirewallPolicy",
"FirewallPolicyAssociation",
"FirewallPolicyList",
"FirewallPolicyRule",
"FirewallPolicyRuleMatcher",
"FirewallPolicyRuleMatcherLayer4Config",
"FirewallsClient",
"FixedOrPercent",
"ForwardingRule",
"ForwardingRuleAggregatedList",
"ForwardingRuleList",
"ForwardingRuleReference",
"ForwardingRuleServiceDirectoryRegistration",
"ForwardingRulesClient",
"ForwardingRulesScopedList",
"GRPCHealthCheck",
"GetAcceleratorTypeRequest",
"GetAddressRequest",
"GetAssociationFirewallPolicyRequest",
"GetAutoscalerRequest",
"GetBackendBucketRequest",
"GetBackendServiceRequest",
"GetDiagnosticsInterconnectRequest",
"GetDiskRequest",
"GetDiskTypeRequest",
"GetEffectiveFirewallsInstanceRequest",
"GetEffectiveFirewallsNetworkRequest",
"GetExternalVpnGatewayRequest",
"GetFirewallPolicyRequest",
"GetFirewallRequest",
"GetForwardingRuleRequest",
"GetFromFamilyImageRequest",
"GetGlobalAddressRequest",
"GetGlobalForwardingRuleRequest",
"GetGlobalNetworkEndpointGroupRequest",
"GetGlobalOperationRequest",
"GetGlobalOrganizationOperationRequest",
"GetGlobalPublicDelegatedPrefixeRequest",
"GetGuestAttributesInstanceRequest",
"GetHealthBackendServiceRequest",
"GetHealthCheckRequest",
"GetHealthRegionBackendServiceRequest",
"GetHealthTargetPoolRequest",
"GetIamPolicyDiskRequest",
"GetIamPolicyFirewallPolicyRequest",
"GetIamPolicyImageRequest",
"GetIamPolicyInstanceRequest",
"GetIamPolicyInstanceTemplateRequest",
"GetIamPolicyLicenseRequest",
"GetIamPolicyNodeGroupRequest",
"GetIamPolicyNodeTemplateRequest",
"GetIamPolicyRegionDiskRequest",
"GetIamPolicyReservationRequest",
"GetIamPolicyResourcePolicyRequest",
"GetIamPolicySnapshotRequest",
"GetIamPolicySubnetworkRequest",
"GetImageRequest",
"GetInstanceGroupManagerRequest",
"GetInstanceGroupRequest",
"GetInstanceRequest",
"GetInstanceTemplateRequest",
"GetInterconnectAttachmentRequest",
"GetInterconnectLocationRequest",
"GetInterconnectRequest",
"GetLicenseCodeRequest",
"GetLicenseRequest",
"GetMachineTypeRequest",
"GetNatMappingInfoRoutersRequest",
"GetNetworkEndpointGroupRequest",
"GetNetworkRequest",
"GetNodeGroupRequest",
"GetNodeTemplateRequest",
"GetNodeTypeRequest",
"GetPacketMirroringRequest",
"GetProjectRequest",
"GetPublicAdvertisedPrefixeRequest",
"GetPublicDelegatedPrefixeRequest",
"GetRegionAutoscalerRequest",
"GetRegionBackendServiceRequest",
"GetRegionCommitmentRequest",
"GetRegionDiskRequest",
"GetRegionDiskTypeRequest",
"GetRegionHealthCheckRequest",
"GetRegionHealthCheckServiceRequest",
"GetRegionInstanceGroupManagerRequest",
"GetRegionInstanceGroupRequest",
"GetRegionNetworkEndpointGroupRequest",
"GetRegionNotificationEndpointRequest",
"GetRegionOperationRequest",
"GetRegionRequest",
"GetRegionSslCertificateRequest",
"GetRegionTargetHttpProxyRequest",
"GetRegionTargetHttpsProxyRequest",
"GetRegionUrlMapRequest",
"GetReservationRequest",
"GetResourcePolicyRequest",
"GetRouteRequest",
"GetRouterRequest",
"GetRouterStatusRouterRequest",
"GetRuleFirewallPolicyRequest",
"GetRuleSecurityPolicyRequest",
"GetScreenshotInstanceRequest",
"GetSecurityPolicyRequest",
"GetSerialPortOutputInstanceRequest",
"GetShieldedInstanceIdentityInstanceRequest",
"GetSnapshotRequest",
"GetSslCertificateRequest",
"GetSslPolicyRequest",
"GetStatusVpnGatewayRequest",
"GetSubnetworkRequest",
"GetTargetGrpcProxyRequest",
"GetTargetHttpProxyRequest",
"GetTargetHttpsProxyRequest",
"GetTargetInstanceRequest",
"GetTargetPoolRequest",
"GetTargetSslProxyRequest",
"GetTargetTcpProxyRequest",
"GetTargetVpnGatewayRequest",
"GetUrlMapRequest",
"GetVpnGatewayRequest",
"GetVpnTunnelRequest",
"GetXpnHostProjectRequest",
"GetXpnResourcesProjectsRequest",
"GetZoneOperationRequest",
"GetZoneRequest",
"GlobalAddressesClient",
"GlobalForwardingRulesClient",
"GlobalNetworkEndpointGroupsAttachEndpointsRequest",
"GlobalNetworkEndpointGroupsClient",
"GlobalNetworkEndpointGroupsDetachEndpointsRequest",
"GlobalOperationsClient",
"GlobalOrganizationOperationsClient",
"GlobalOrganizationSetPolicyRequest",
"GlobalPublicDelegatedPrefixesClient",
"GlobalSetLabelsRequest",
"GlobalSetPolicyRequest",
"GuestAttributes",
"GuestAttributesEntry",
"GuestAttributesValue",
"GuestOsFeature",
"HTTP2HealthCheck",
"HTTPHealthCheck",
"HTTPSHealthCheck",
"HealthCheck",
"HealthCheckList",
"HealthCheckLogConfig",
"HealthCheckReference",
"HealthCheckService",
"HealthCheckServiceReference",
"HealthCheckServicesList",
"HealthChecksAggregatedList",
"HealthChecksClient",
"HealthChecksScopedList",
"HealthStatus",
"HealthStatusForNetworkEndpoint",
"HostRule",
"HttpFaultAbort",
"HttpFaultDelay",
"HttpFaultInjection",
"HttpHeaderAction",
"HttpHeaderMatch",
"HttpHeaderOption",
"HttpQueryParameterMatch",
"HttpRedirectAction",
"HttpRetryPolicy",
"HttpRouteAction",
"HttpRouteRule",
"HttpRouteRuleMatch",
"Image",
"ImageList",
"ImagesClient",
"InitialStateConfig",
"InsertAddressRequest",
"InsertAutoscalerRequest",
"InsertBackendBucketRequest",
"InsertBackendServiceRequest",
"InsertDiskRequest",
"InsertExternalVpnGatewayRequest",
"InsertFirewallPolicyRequest",
"InsertFirewallRequest",
"InsertForwardingRuleRequest",
"InsertGlobalAddressRequest",
"InsertGlobalForwardingRuleRequest",
"InsertGlobalNetworkEndpointGroupRequest",
"InsertGlobalPublicDelegatedPrefixeRequest",
"InsertHealthCheckRequest",
"InsertImageRequest",
"InsertInstanceGroupManagerRequest",
"InsertInstanceGroupRequest",
"InsertInstanceRequest",
"InsertInstanceTemplateRequest",
"InsertInterconnectAttachmentRequest",
"InsertInterconnectRequest",
"InsertLicenseRequest",
"InsertNetworkEndpointGroupRequest",
"InsertNetworkRequest",
"InsertNodeGroupRequest",
"InsertNodeTemplateRequest",
"InsertPacketMirroringRequest",
"InsertPublicAdvertisedPrefixeRequest",
"InsertPublicDelegatedPrefixeRequest",
"InsertRegionAutoscalerRequest",
"InsertRegionBackendServiceRequest",
"InsertRegionCommitmentRequest",
"InsertRegionDiskRequest",
"InsertRegionHealthCheckRequest",
"InsertRegionHealthCheckServiceRequest",
"InsertRegionInstanceGroupManagerRequest",
"InsertRegionNetworkEndpointGroupRequest",
"InsertRegionNotificationEndpointRequest",
"InsertRegionSslCertificateRequest",
"InsertRegionTargetHttpProxyRequest",
"InsertRegionTargetHttpsProxyRequest",
"InsertRegionUrlMapRequest",
"InsertReservationRequest",
"InsertResourcePolicyRequest",
"InsertRouteRequest",
"InsertRouterRequest",
"InsertSecurityPolicyRequest",
"InsertSslCertificateRequest",
"InsertSslPolicyRequest",
"InsertSubnetworkRequest",
"InsertTargetGrpcProxyRequest",
"InsertTargetHttpProxyRequest",
"InsertTargetHttpsProxyRequest",
"InsertTargetInstanceRequest",
"InsertTargetPoolRequest",
"InsertTargetSslProxyRequest",
"InsertTargetTcpProxyRequest",
"InsertTargetVpnGatewayRequest",
"InsertUrlMapRequest",
"InsertVpnGatewayRequest",
"InsertVpnTunnelRequest",
"Instance",
"InstanceAggregatedList",
"InstanceGroup",
"InstanceGroupAggregatedList",
"InstanceGroupList",
"InstanceGroupManager",
"InstanceGroupManagerActionsSummary",
"InstanceGroupManagerAggregatedList",
"InstanceGroupManagerAutoHealingPolicy",
"InstanceGroupManagerList",
"InstanceGroupManagerStatus",
"InstanceGroupManagerStatusStateful",
"InstanceGroupManagerStatusStatefulPerInstanceConfigs",
"InstanceGroupManagerStatusVersionTarget",
"InstanceGroupManagerUpdatePolicy",
"InstanceGroupManagerVersion",
"InstanceGroupManagersAbandonInstancesRequest",
"InstanceGroupManagersApplyUpdatesRequest",
"InstanceGroupManagersClient",
"InstanceGroupManagersCreateInstancesRequest",
"InstanceGroupManagersDeleteInstancesRequest",
"InstanceGroupManagersDeletePerInstanceConfigsReq",
"InstanceGroupManagersListErrorsResponse",
"InstanceGroupManagersListManagedInstancesResponse",
"InstanceGroupManagersListPerInstanceConfigsResp",
"InstanceGroupManagersPatchPerInstanceConfigsReq",
"InstanceGroupManagersRecreateInstancesRequest",
"InstanceGroupManagersScopedList",
"InstanceGroupManagersSetInstanceTemplateRequest",
"InstanceGroupManagersSetTargetPoolsRequest",
"InstanceGroupManagersUpdatePerInstanceConfigsReq",
"InstanceGroupsAddInstancesRequest",
"InstanceGroupsClient",
"InstanceGroupsListInstances",
"InstanceGroupsListInstancesRequest",
"InstanceGroupsRemoveInstancesRequest",
"InstanceGroupsScopedList",
"InstanceGroupsSetNamedPortsRequest",
"InstanceList",
"InstanceListReferrers",
"InstanceManagedByIgmError",
"InstanceManagedByIgmErrorInstanceActionDetails",
"InstanceManagedByIgmErrorManagedInstanceError",
"InstanceMoveRequest",
"InstanceProperties",
"InstanceReference",
"InstanceTemplate",
"InstanceTemplateList",
"InstanceTemplatesClient",
"InstanceWithNamedPorts",
"InstancesAddResourcePoliciesRequest",
"InstancesClient",
"InstancesGetEffectiveFirewallsResponse",
"InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy",
"InstancesRemoveResourcePoliciesRequest",
"InstancesScopedList",
"InstancesSetLabelsRequest",
"InstancesSetMachineResourcesRequest",
"InstancesSetMachineTypeRequest",
"InstancesSetMinCpuPlatformRequest",
"InstancesSetServiceAccountRequest",
"InstancesStartWithEncryptionKeyRequest",
"Int64RangeMatch",
"Interconnect",
"InterconnectAttachment",
"InterconnectAttachmentAggregatedList",
"InterconnectAttachmentList",
"InterconnectAttachmentPartnerMetadata",
"InterconnectAttachmentPrivateInfo",
"InterconnectAttachmentsClient",
"InterconnectAttachmentsScopedList",
"InterconnectCircuitInfo",
"InterconnectDiagnostics",
"InterconnectDiagnosticsARPEntry",
"InterconnectDiagnosticsLinkLACPStatus",
"InterconnectDiagnosticsLinkOpticalPower",
"InterconnectDiagnosticsLinkStatus",
"InterconnectList",
"InterconnectLocation",
"InterconnectLocationList",
"InterconnectLocationRegionInfo",
"InterconnectLocationsClient",
"InterconnectOutageNotification",
"InterconnectsClient",
"InterconnectsGetDiagnosticsResponse",
"InvalidateCacheUrlMapRequest",
"Items",
"License",
"LicenseCode",
"LicenseCodeLicenseAlias",
"LicenseCodesClient",
"LicenseResourceCommitment",
"LicenseResourceRequirements",
"LicensesClient",
"LicensesListResponse",
"ListAcceleratorTypesRequest",
"ListAddressesRequest",
"ListAssociationsFirewallPolicyRequest",
"ListAutoscalersRequest",
"ListAvailableFeaturesSslPoliciesRequest",
"ListBackendBucketsRequest",
"ListBackendServicesRequest",
"ListDiskTypesRequest",
"ListDisksRequest",
"ListErrorsInstanceGroupManagersRequest",
"ListErrorsRegionInstanceGroupManagersRequest",
"ListExternalVpnGatewaysRequest",
"ListFirewallPoliciesRequest",
"ListFirewallsRequest",
"ListForwardingRulesRequest",
"ListGlobalAddressesRequest",
"ListGlobalForwardingRulesRequest",
"ListGlobalNetworkEndpointGroupsRequest",
"ListGlobalOperationsRequest",
"ListGlobalOrganizationOperationsRequest",
"ListGlobalPublicDelegatedPrefixesRequest",
"ListHealthChecksRequest",
"ListImagesRequest",
"ListInstanceGroupManagersRequest",
"ListInstanceGroupsRequest",
"ListInstanceTemplatesRequest",
"ListInstancesInstanceGroupsRequest",
"ListInstancesRegionInstanceGroupsRequest",
"ListInstancesRequest",
"ListInterconnectAttachmentsRequest",
"ListInterconnectLocationsRequest",
"ListInterconnectsRequest",
"ListLicensesRequest",
"ListMachineTypesRequest",
"ListManagedInstancesInstanceGroupManagersRequest",
"ListManagedInstancesRegionInstanceGroupManagersRequest",
"ListNetworkEndpointGroupsRequest",
"ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest",
"ListNetworkEndpointsNetworkEndpointGroupsRequest",
"ListNetworksRequest",
"ListNodeGroupsRequest",
"ListNodeTemplatesRequest",
"ListNodeTypesRequest",
"ListNodesNodeGroupsRequest",
"ListPacketMirroringsRequest",
"ListPeeringRoutesNetworksRequest",
"ListPerInstanceConfigsInstanceGroupManagersRequest",
"ListPerInstanceConfigsRegionInstanceGroupManagersRequest",
"ListPreconfiguredExpressionSetsSecurityPoliciesRequest",
"ListPublicAdvertisedPrefixesRequest",
"ListPublicDelegatedPrefixesRequest",
"ListReferrersInstancesRequest",
"ListRegionAutoscalersRequest",
"ListRegionBackendServicesRequest",
"ListRegionCommitmentsRequest",
"ListRegionDiskTypesRequest",
"ListRegionDisksRequest",
"ListRegionHealthCheckServicesRequest",
"ListRegionHealthChecksRequest",
"ListRegionInstanceGroupManagersRequest",
"ListRegionInstanceGroupsRequest",
"ListRegionNetworkEndpointGroupsRequest",
"ListRegionNotificationEndpointsRequest",
"ListRegionOperationsRequest",
"ListRegionSslCertificatesRequest",
"ListRegionTargetHttpProxiesRequest",
"ListRegionTargetHttpsProxiesRequest",
"ListRegionUrlMapsRequest",
"ListRegionsRequest",
"ListReservationsRequest",
"ListResourcePoliciesRequest",
"ListRoutersRequest",
"ListRoutesRequest",
"ListSecurityPoliciesRequest",
"ListSnapshotsRequest",
"ListSslCertificatesRequest",
"ListSslPoliciesRequest",
"ListSubnetworksRequest",
"ListTargetGrpcProxiesRequest",
"ListTargetHttpProxiesRequest",
"ListTargetHttpsProxiesRequest",
"ListTargetInstancesRequest",
"ListTargetPoolsRequest",
"ListTargetSslProxiesRequest",
"ListTargetTcpProxiesRequest",
"ListTargetVpnGatewaysRequest",
"ListUrlMapsRequest",
"ListUsableSubnetworksRequest",
"ListVpnGatewaysRequest",
"ListVpnTunnelsRequest",
"ListXpnHostsProjectsRequest",
"ListZoneOperationsRequest",
"ListZonesRequest",
"LocalDisk",
"LocationPolicy",
"LocationPolicyLocation",
"LogConfig",
"LogConfigCloudAuditOptions",
"LogConfigCounterOptions",
"LogConfigCounterOptionsCustomField",
"LogConfigDataAccessOptions",
"MachineType",
"MachineTypeAggregatedList",
"MachineTypeList",
"MachineTypesClient",
"MachineTypesScopedList",
"ManagedInstance",
"ManagedInstanceInstanceHealth",
"ManagedInstanceLastAttempt",
"ManagedInstanceVersion",
"Metadata",
"MetadataFilter",
"MetadataFilterLabelMatch",
"MoveDiskProjectRequest",
"MoveFirewallPolicyRequest",
"MoveInstanceProjectRequest",
"NamedPort",
"Network",
"NetworkEndpoint",
"NetworkEndpointGroup",
"NetworkEndpointGroupAggregatedList",
"NetworkEndpointGroupAppEngine",
"NetworkEndpointGroupCloudFunction",
"NetworkEndpointGroupCloudRun",
"NetworkEndpointGroupList",
"NetworkEndpointGroupsAttachEndpointsRequest",
"NetworkEndpointGroupsClient",
"NetworkEndpointGroupsDetachEndpointsRequest",
"NetworkEndpointGroupsListEndpointsRequest",
"NetworkEndpointGroupsListNetworkEndpoints",
"NetworkEndpointGroupsScopedList",
"NetworkEndpointWithHealthStatus",
"NetworkInterface",
"NetworkList",
"NetworkPeering",
"NetworkRoutingConfig",
"NetworksAddPeeringRequest",
"NetworksClient",
"NetworksGetEffectiveFirewallsResponse",
"NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy",
"NetworksRemovePeeringRequest",
"NetworksUpdatePeeringRequest",
"NodeGroup",
"NodeGroupAggregatedList",
"NodeGroupAutoscalingPolicy",
"NodeGroupList",
"NodeGroupMaintenanceWindow",
"NodeGroupNode",
"NodeGroupsAddNodesRequest",
"NodeGroupsClient",
"NodeGroupsDeleteNodesRequest",
"NodeGroupsListNodes",
"NodeGroupsScopedList",
"NodeGroupsSetNodeTemplateRequest",
"NodeTemplate",
"NodeTemplateAggregatedList",
"NodeTemplateList",
"NodeTemplateNodeTypeFlexibility",
"NodeTemplatesClient",
"NodeTemplatesScopedList",
"NodeType",
"NodeTypeAggregatedList",
"NodeTypeList",
"NodeTypesClient",
"NodeTypesScopedList",
"NotificationEndpoint",
"NotificationEndpointGrpcSettings",
"NotificationEndpointList",
"Operation",
"OperationAggregatedList",
"OperationList",
"OperationsScopedList",
"OutlierDetection",
"PacketMirroring",
"PacketMirroringAggregatedList",
"PacketMirroringFilter",
"PacketMirroringForwardingRuleInfo",
"PacketMirroringList",
"PacketMirroringMirroredResourceInfo",
"PacketMirroringMirroredResourceInfoInstanceInfo",
"PacketMirroringMirroredResourceInfoSubnetInfo",
"PacketMirroringNetworkInfo",
"PacketMirroringsClient",
"PacketMirroringsScopedList",
"PatchAutoscalerRequest",
"PatchBackendBucketRequest",
"PatchBackendServiceRequest",
"PatchFirewallPolicyRequest",
"PatchFirewallRequest",
"PatchForwardingRuleRequest",
"PatchGlobalForwardingRuleRequest",
"PatchGlobalPublicDelegatedPrefixeRequest",
"PatchHealthCheckRequest",
"PatchImageRequest",
"PatchInstanceGroupManagerRequest",
"PatchInterconnectAttachmentRequest",
"PatchInterconnectRequest",
"PatchNetworkRequest",
"PatchNodeGroupRequest",
"PatchPacketMirroringRequest",
"PatchPerInstanceConfigsInstanceGroupManagerRequest",
"PatchPerInstanceConfigsRegionInstanceGroupManagerRequest",
"PatchPublicAdvertisedPrefixeRequest",
"PatchPublicDelegatedPrefixeRequest",
"PatchRegionAutoscalerRequest",
"PatchRegionBackendServiceRequest",
"PatchRegionHealthCheckRequest",
"PatchRegionHealthCheckServiceRequest",
"PatchRegionInstanceGroupManagerRequest",
"PatchRegionUrlMapRequest",
"PatchRouterRequest",
"PatchRuleFirewallPolicyRequest",
"PatchRuleSecurityPolicyRequest",
"PatchSecurityPolicyRequest",
"PatchSslPolicyRequest",
"PatchSubnetworkRequest",
"PatchTargetGrpcProxyRequest",
"PatchTargetHttpProxyRequest",
"PatchTargetHttpsProxyRequest",
"PatchUrlMapRequest",
"PathMatcher",
"PathRule",
"PerInstanceConfig",
"Policy",
"PreconfiguredWafSet",
"PreservedState",
"PreservedStatePreservedDisk",
"PreviewRouterRequest",
"Project",
"ProjectsClient",
"ProjectsDisableXpnResourceRequest",
"ProjectsEnableXpnResourceRequest",
"ProjectsGetXpnResources",
"ProjectsListXpnHostsRequest",
"ProjectsSetDefaultNetworkTierRequest",
"PublicAdvertisedPrefix",
"PublicAdvertisedPrefixList",
"PublicAdvertisedPrefixPublicDelegatedPrefix",
"PublicAdvertisedPrefixesClient",
"PublicDelegatedPrefix",
"PublicDelegatedPrefixAggregatedList",
"PublicDelegatedPrefixList",
"PublicDelegatedPrefixPublicDelegatedSubPrefix",
"PublicDelegatedPrefixesClient",
"PublicDelegatedPrefixesScopedList",
"Quota",
"RawDisk",
"RecreateInstancesInstanceGroupManagerRequest",
"RecreateInstancesRegionInstanceGroupManagerRequest",
"Reference",
"Region",
"RegionAutoscalerList",
"RegionAutoscalersClient",
"RegionBackendServicesClient",
"RegionCommitmentsClient",
"RegionDiskTypeList",
"RegionDiskTypesClient",
"RegionDisksAddResourcePoliciesRequest",
"RegionDisksClient",
"RegionDisksRemoveResourcePoliciesRequest",
"RegionDisksResizeRequest",
"RegionHealthCheckServicesClient",
"RegionHealthChecksClient",
"RegionInstanceGroupList",
"RegionInstanceGroupManagerDeleteInstanceConfigReq",
"RegionInstanceGroupManagerList",
"RegionInstanceGroupManagerPatchInstanceConfigReq",
"RegionInstanceGroupManagerUpdateInstanceConfigReq",
"RegionInstanceGroupManagersAbandonInstancesRequest",
"RegionInstanceGroupManagersApplyUpdatesRequest",
"RegionInstanceGroupManagersClient",
"RegionInstanceGroupManagersCreateInstancesRequest",
"RegionInstanceGroupManagersDeleteInstancesRequest",
"RegionInstanceGroupManagersListErrorsResponse",
"RegionInstanceGroupManagersListInstanceConfigsResp",
"RegionInstanceGroupManagersListInstancesResponse",
"RegionInstanceGroupManagersRecreateRequest",
"RegionInstanceGroupManagersSetTargetPoolsRequest",
"RegionInstanceGroupManagersSetTemplateRequest",
"RegionInstanceGroupsClient",
"RegionInstanceGroupsListInstances",
"RegionInstanceGroupsListInstancesRequest",
"RegionInstanceGroupsSetNamedPortsRequest",
"RegionInstancesClient",
"RegionList",
"RegionNetworkEndpointGroupsClient",
"RegionNotificationEndpointsClient",
"RegionOperationsClient",
"RegionSetLabelsRequest",
"RegionSetPolicyRequest",
"RegionSslCertificatesClient",
"RegionTargetHttpProxiesClient",
"RegionTargetHttpsProxiesClient",
"RegionTargetHttpsProxiesSetSslCertificatesRequest",
"RegionUrlMapsClient",
"RegionUrlMapsValidateRequest",
"RegionsClient",
"RemoveAssociationFirewallPolicyRequest",
"RemoveHealthCheckTargetPoolRequest",
"RemoveInstanceTargetPoolRequest",
"RemoveInstancesInstanceGroupRequest",
"RemovePeeringNetworkRequest",
"RemoveResourcePoliciesDiskRequest",
"RemoveResourcePoliciesInstanceRequest",
"RemoveResourcePoliciesRegionDiskRequest",
"RemoveRuleFirewallPolicyRequest",
"RemoveRuleSecurityPolicyRequest",
"RequestMirrorPolicy",
"Reservation",
"ReservationAffinity",
"ReservationAggregatedList",
"ReservationList",
"ReservationsClient",
"ReservationsResizeRequest",
"ReservationsScopedList",
"ResetInstanceRequest",
"ResizeDiskRequest",
"ResizeInstanceGroupManagerRequest",
"ResizeRegionDiskRequest",
"ResizeRegionInstanceGroupManagerRequest",
"ResizeReservationRequest",
"ResourceCommitment",
"ResourceGroupReference",
"ResourcePoliciesClient",
"ResourcePoliciesScopedList",
"ResourcePolicy",
"ResourcePolicyAggregatedList",
"ResourcePolicyDailyCycle",
"ResourcePolicyGroupPlacementPolicy",
"ResourcePolicyHourlyCycle",
"ResourcePolicyInstanceSchedulePolicy",
"ResourcePolicyInstanceSchedulePolicySchedule",
"ResourcePolicyList",
"ResourcePolicyResourceStatus",
"ResourcePolicyResourceStatusInstanceSchedulePolicyStatus",
"ResourcePolicySnapshotSchedulePolicy",
"ResourcePolicySnapshotSchedulePolicyRetentionPolicy",
"ResourcePolicySnapshotSchedulePolicySchedule",
"ResourcePolicySnapshotSchedulePolicySnapshotProperties",
"ResourcePolicyWeeklyCycle",
"ResourcePolicyWeeklyCycleDayOfWeek",
"Route",
"RouteList",
"Router",
"RouterAdvertisedIpRange",
"RouterAggregatedList",
"RouterBgp",
"RouterBgpPeer",
"RouterInterface",
"RouterList",
"RouterNat",
"RouterNatLogConfig",
"RouterNatSubnetworkToNat",
"RouterStatus",
"RouterStatusBgpPeerStatus",
"RouterStatusNatStatus",
"RouterStatusResponse",
"RoutersClient",
"RoutersPreviewResponse",
"RoutersScopedList",
"RoutesClient",
"Rule",
"SSLHealthCheck",
"ScalingScheduleStatus",
"Scheduling",
"SchedulingNodeAffinity",
"ScratchDisks",
"Screenshot",
"SecurityPoliciesClient",
"SecurityPoliciesListPreconfiguredExpressionSetsResponse",
"SecurityPoliciesWafConfig",
"SecurityPolicy",
"SecurityPolicyList",
"SecurityPolicyReference",
"SecurityPolicyRule",
"SecurityPolicyRuleMatcher",
"SecurityPolicyRuleMatcherConfig",
"SecuritySettings",
"SerialPortOutput",
"ServerBinding",
"ServiceAccount",
"SetBackendServiceTargetSslProxyRequest",
"SetBackendServiceTargetTcpProxyRequest",
"SetBackupTargetPoolRequest",
"SetCommonInstanceMetadataProjectRequest",
"SetDefaultNetworkTierProjectRequest",
"SetDeletionProtectionInstanceRequest",
"SetDiskAutoDeleteInstanceRequest",
"SetIamPolicyDiskRequest",
"SetIamPolicyFirewallPolicyRequest",
"SetIamPolicyImageRequest",
"SetIamPolicyInstanceRequest",
"SetIamPolicyInstanceTemplateRequest",
"SetIamPolicyLicenseRequest",
"SetIamPolicyNodeGroupRequest",
"SetIamPolicyNodeTemplateRequest",
"SetIamPolicyRegionDiskRequest",
"SetIamPolicyReservationRequest",
"SetIamPolicyResourcePolicyRequest",
"SetIamPolicySnapshotRequest",
"SetIamPolicySubnetworkRequest",
"SetInstanceTemplateInstanceGroupManagerRequest",
"SetInstanceTemplateRegionInstanceGroupManagerRequest",
"SetLabelsDiskRequest",
"SetLabelsExternalVpnGatewayRequest",
"SetLabelsForwardingRuleRequest",
"SetLabelsGlobalForwardingRuleRequest",
"SetLabelsImageRequest",
"SetLabelsInstanceRequest",
"SetLabelsRegionDiskRequest",
"SetLabelsSnapshotRequest",
"SetLabelsVpnGatewayRequest",
"SetMachineResourcesInstanceRequest",
"SetMachineTypeInstanceRequest",
"SetMetadataInstanceRequest",
"SetMinCpuPlatformInstanceRequest",
"SetNamedPortsInstanceGroupRequest",
"SetNamedPortsRegionInstanceGroupRequest",
"SetNodeTemplateNodeGroupRequest",
"SetPrivateIpGoogleAccessSubnetworkRequest",
"SetProxyHeaderTargetSslProxyRequest",
"SetProxyHeaderTargetTcpProxyRequest",
"SetQuicOverrideTargetHttpsProxyRequest",
"SetSchedulingInstanceRequest",
"SetSecurityPolicyBackendServiceRequest",
"SetServiceAccountInstanceRequest",
"SetShieldedInstanceIntegrityPolicyInstanceRequest",
"SetSslCertificatesRegionTargetHttpsProxyRequest",
"SetSslCertificatesTargetHttpsProxyRequest",
"SetSslCertificatesTargetSslProxyRequest",
"SetSslPolicyTargetHttpsProxyRequest",
"SetSslPolicyTargetSslProxyRequest",
"SetTagsInstanceRequest",
"SetTargetForwardingRuleRequest",
"SetTargetGlobalForwardingRuleRequest",
"SetTargetPoolsInstanceGroupManagerRequest",
"SetTargetPoolsRegionInstanceGroupManagerRequest",
"SetUrlMapRegionTargetHttpProxyRequest",
"SetUrlMapRegionTargetHttpsProxyRequest",
"SetUrlMapTargetHttpProxyRequest",
"SetUrlMapTargetHttpsProxyRequest",
"SetUsageExportBucketProjectRequest",
"ShieldedInstanceConfig",
"ShieldedInstanceIdentity",
"ShieldedInstanceIdentityEntry",
"ShieldedInstanceIntegrityPolicy",
"SignedUrlKey",
"SimulateMaintenanceEventInstanceRequest",
"Snapshot",
"SnapshotList",
"SnapshotsClient",
"SourceInstanceParams",
"SslCertificate",
"SslCertificateAggregatedList",
"SslCertificateList",
"SslCertificateManagedSslCertificate",
"SslCertificateSelfManagedSslCertificate",
"SslCertificatesClient",
"SslCertificatesScopedList",
"SslPoliciesClient",
"SslPoliciesList",
"SslPoliciesListAvailableFeaturesResponse",
"SslPolicy",
"SslPolicyReference",
"StartInstanceRequest",
"StartWithEncryptionKeyInstanceRequest",
"StatefulPolicy",
"StatefulPolicyPreservedState",
"StatefulPolicyPreservedStateDiskDevice",
"StopInstanceRequest",
"Subnetwork",
"SubnetworkAggregatedList",
"SubnetworkList",
"SubnetworkLogConfig",
"SubnetworkSecondaryRange",
"SubnetworksClient",
"SubnetworksExpandIpCidrRangeRequest",
"SubnetworksScopedList",
"SubnetworksSetPrivateIpGoogleAccessRequest",
"SwitchToCustomModeNetworkRequest",
"TCPHealthCheck",
"Tags",
"TargetGrpcProxiesClient",
"TargetGrpcProxy",
"TargetGrpcProxyList",
"TargetHttpProxiesClient",
"TargetHttpProxiesScopedList",
"TargetHttpProxy",
"TargetHttpProxyAggregatedList",
"TargetHttpProxyList",
"TargetHttpsProxiesClient",
"TargetHttpsProxiesScopedList",
"TargetHttpsProxiesSetQuicOverrideRequest",
"TargetHttpsProxiesSetSslCertificatesRequest",
"TargetHttpsProxy",
"TargetHttpsProxyAggregatedList",
"TargetHttpsProxyList",
"TargetInstance",
"TargetInstanceAggregatedList",
"TargetInstanceList",
"TargetInstancesClient",
"TargetInstancesScopedList",
"TargetPool",
"TargetPoolAggregatedList",
"TargetPoolInstanceHealth",
"TargetPoolList",
"TargetPoolsAddHealthCheckRequest",
"TargetPoolsAddInstanceRequest",
"TargetPoolsClient",
"TargetPoolsRemoveHealthCheckRequest",
"TargetPoolsRemoveInstanceRequest",
"TargetPoolsScopedList",
"TargetReference",
"TargetSslProxiesClient",
"TargetSslProxiesSetBackendServiceRequest",
"TargetSslProxiesSetProxyHeaderRequest",
"TargetSslProxiesSetSslCertificatesRequest",
"TargetSslProxy",
"TargetSslProxyList",
"TargetTcpProxiesClient",
"TargetTcpProxiesSetBackendServiceRequest",
"TargetTcpProxiesSetProxyHeaderRequest",
"TargetTcpProxy",
"TargetTcpProxyList",
"TargetVpnGateway",
"TargetVpnGatewayAggregatedList",
"TargetVpnGatewayList",
"TargetVpnGatewaysClient",
"TargetVpnGatewaysScopedList",
"TestFailure",
"TestIamPermissionsDiskRequest",
"TestIamPermissionsExternalVpnGatewayRequest",
"TestIamPermissionsFirewallPolicyRequest",
"TestIamPermissionsImageRequest",
"TestIamPermissionsInstanceRequest",
"TestIamPermissionsInstanceTemplateRequest",
"TestIamPermissionsLicenseCodeRequest",
"TestIamPermissionsLicenseRequest",
"TestIamPermissionsNetworkEndpointGroupRequest",
"TestIamPermissionsNodeGroupRequest",
"TestIamPermissionsNodeTemplateRequest",
"TestIamPermissionsPacketMirroringRequest",
"TestIamPermissionsRegionDiskRequest",
"TestIamPermissionsReservationRequest",
"TestIamPermissionsResourcePolicyRequest",
"TestIamPermissionsSnapshotRequest",
"TestIamPermissionsSubnetworkRequest",
"TestIamPermissionsVpnGatewayRequest",
"TestPermissionsRequest",
"TestPermissionsResponse",
"UpdateAccessConfigInstanceRequest",
"UpdateAutoscalerRequest",
"UpdateBackendBucketRequest",
"UpdateBackendServiceRequest",
"UpdateDisplayDeviceInstanceRequest",
"UpdateFirewallRequest",
"UpdateHealthCheckRequest",
"UpdateInstanceRequest",
"UpdateNetworkInterfaceInstanceRequest",
"UpdatePeeringNetworkRequest",
"UpdatePerInstanceConfigsInstanceGroupManagerRequest",
"UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest",
"UpdateRegionAutoscalerRequest",
"UpdateRegionBackendServiceRequest",
"UpdateRegionHealthCheckRequest",
"UpdateRegionUrlMapRequest",
"UpdateRouterRequest",
"UpdateShieldedInstanceConfigInstanceRequest",
"UpdateUrlMapRequest",
"UrlMap",
"UrlMapList",
"UrlMapReference",
"UrlMapTest",
"UrlMapTestHeader",
"UrlMapValidationResult",
"UrlMapsAggregatedList",
"UrlMapsClient",
"UrlMapsScopedList",
"UrlMapsValidateRequest",
"UrlMapsValidateResponse",
"UrlRewrite",
"UsableSubnetwork",
"UsableSubnetworkSecondaryRange",
"UsableSubnetworksAggregatedList",
"UsageExportLocation",
"ValidateRegionUrlMapRequest",
"ValidateUrlMapRequest",
"VmEndpointNatMappings",
"VmEndpointNatMappingsInterfaceNatMappings",
"VmEndpointNatMappingsList",
"VpnGateway",
"VpnGatewayAggregatedList",
"VpnGatewayList",
"VpnGatewayStatus",
"VpnGatewayStatusHighAvailabilityRequirementState",
"VpnGatewayStatusTunnel",
"VpnGatewayStatusVpnConnection",
"VpnGatewayVpnGatewayInterface",
"VpnGatewaysClient",
"VpnGatewaysGetStatusResponse",
"VpnGatewaysScopedList",
"VpnTunnel",
"VpnTunnelAggregatedList",
"VpnTunnelList",
"VpnTunnelsClient",
"VpnTunnelsScopedList",
"WafExpressionSet",
"WafExpressionSetExpression",
"WaitGlobalOperationRequest",
"WaitRegionOperationRequest",
"WaitZoneOperationRequest",
"Warning",
"Warnings",
"WeightedBackendService",
"XpnHostList",
"XpnResourceId",
"Zone",
"ZoneList",
"ZoneOperationsClient",
"ZoneSetLabelsRequest",
"ZoneSetPolicyRequest",
"ZonesClient",
)
| apache-2.0 | 5,650,959,683,691,489,000 | 43.769451 | 88 | 0.842628 | false |
jbalogh/zamboni | apps/translations/tests/test_helpers.py | 2 | 2481 | from django.conf import settings
from django.utils import translation
import jingo
from mock import Mock
from nose.tools import eq_
from translations import helpers
from translations.models import PurifiedTranslation
def super():
jingo.load_helpers()
def test_locale_html():
"""Test HTML attributes for languages different than the site language"""
testfield = Mock()
# same language: no need for attributes
this_lang = translation.get_language()
testfield.locale = this_lang
s = helpers.locale_html(testfield)
assert not s, 'no special HTML attributes for site language'
# non-rtl language
testfield.locale = 'de'
s = helpers.locale_html(testfield)
eq_(s, ' lang="de" dir="ltr"')
# rtl language
for lang in settings.RTL_LANGUAGES:
testfield.locale = lang
s = helpers.locale_html(testfield)
eq_(s, ' lang="%s" dir="rtl"' % testfield.locale)
def test_locale_html_xss():
"""Test for nastiness-removal in the transfield's locale"""
testfield = Mock()
# same language: no need for attributes
testfield.locale = '<script>alert(1)</script>'
s = helpers.locale_html(testfield)
assert '<script>' not in s
assert '<script>alert(1)</script>' in s
def test_empty_locale_html():
"""locale_html must still work if field is None."""
s = helpers.locale_html(None)
assert not s, 'locale_html on None must be empty.'
def test_truncate_purified_field():
s = '<i>one</i><i>two</i>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(6) }}').render(s=t)
eq_(actual, s)
def test_truncate_purified_field_xss():
"""Truncating should not introduce xss issues."""
s = 'safe <script>alert("omg")</script>'
t = PurifiedTranslation(localized_string=s)
actual = jingo.env.from_string('{{ s|truncate(100) }}').render(s=t)
eq_(actual, 'safe <script>alert("omg")</script>')
actual = jingo.env.from_string('{{ s|truncate(5) }}').render(s=t)
eq_(actual, 'safe ...')
def test_clean():
# Links are not mangled, bad HTML is escaped, newlines are slimmed.
s = '<ul><li><a href="#woo">\n\nyeah</a></li>\n\n<li><script></li></ul>'
eq_(helpers.clean(s),
'<ul><li><a href="#woo">\n\nyeah</a></li><li><script></li></ul>')
def test_clean_in_template():
s = '<a href="#woo">yeah</a>'
eq_(jingo.env.from_string('{{ s|clean }}').render(s=s), s)
| bsd-3-clause | -8,254,684,108,146,613,000 | 29.62963 | 79 | 0.644901 | false |
petabi/sniffles | src/sniffles/feature.py | 1 | 19126 | import random
import sys
from sniffles.regex_generator import generate_regex
class AmbiguousNotation:
def __init__(self, notation=None):
self.notation = notation
def __str__(self):
return self.notation
class SetNotation(AmbiguousNotation):
# Set notation should be expressed as {x1,x2,x3...}
# __str__() will return a subset of x(i) with
# the length of subset should be at least 1
# requirement: length of set should be at least 1
# for example: {5,6,9}
# it can return [5,6] or [5,9] or [6,9]
def __init__(self, notation):
super().__init__(notation)
mystr = notation[1:-1]
self.values = mystr.split(",")
self.max_list_size = len(self.values)
def __str__(self):
num_elements = random.randint(1, self.max_list_size)
return '[' + ','.join(random.sample(self.values, num_elements)) + ']'
class RangeNotation(AmbiguousNotation):
# Range notation should be expressed as [x:y] where
# x is lower bound and y is upper bound.
def __init__(self, notation):
super().__init__(notation)
self.prefix = notation[0:1]
self.suffix = notation[-1:]
myrange = notation[1:-1]
self.separator = ":"
bounds = myrange.split(self.separator)
self.lower_bound = int(bounds[0])
self.upper_bound = int(bounds[1])
if self.upper_bound - self.lower_bound < 1:
print("RangeNotation: Upper bound has to be greater than"
" the lower bound." + str(self.upper_bound) + " > "
+ str(self.lower_bound))
sys.exit(0)
def __str__(self):
mylower = random.randint(self.lower_bound, self.upper_bound - 1)
myupper = random.randint(mylower + 1, self.upper_bound)
mystring = self.prefix + str(mylower) + self.separator + \
str(myupper) + self.suffix
return mystring
class ListNotation(AmbiguousNotation):
# list notation should be [x,y] where x is lower bound and
# y is upper bound.
# it will generate a random list of values falling between
# lower bound and upper bound
# for example: [5,10] can generate [5,7,9]
# for [20,20], it will be converted into [19,20]
def __init__(self, notation):
super().__init__(notation)
self.prefix = notation[0:1]
self.suffix = notation[-1:]
mylist = notation[1:-1]
self.separator = ","
bounds = mylist.split(self.separator)
self.lower_bound = int(bounds[0])
self.upper_bound = int(bounds[1])
self.max_list_size = 100
if self.upper_bound < 1:
self.upper_bound = 1
if self.lower_bound >= self.upper_bound:
self.lower_bound = self.upper_bound - 1
def __str__(self):
num_elements = random.randint(2, self.max_list_size)
num_elements = min(num_elements,
self.upper_bound - self.lower_bound + 1
)
sample_size = int((self.upper_bound - self.lower_bound)
/ num_elements) - 1
myelements = []
# if the width of range is not big enough
if sample_size <= 20:
for i in range(self.lower_bound, self.upper_bound + 1):
myelements.append(i)
random.shuffle(myelements)
myelements = myelements[0:num_elements]
myelements = sorted(myelements)
else:
boundarylist = []
lower = self.lower_bound
for i in range(1, num_elements):
upper = lower + sample_size
boundarylist.append([lower, upper])
lower = upper + 1
boundarylist.append([lower, self.upper_bound])
for bounds in boundarylist:
if bounds[0] - bounds[1] <= 1:
myelements.append(bounds[0])
else:
myelements.append(random.randint(bounds[0], bounds[1]))
mystring = self.prefix
while myelements:
myelement = myelements.pop(0)
mystring += str(myelement)
if len(myelements) > 0:
mystring += self.separator
mystring += self.suffix
return mystring
class Feature:
def __init__(self, name=None, lower_bound=0, upper_bound=0,
complexity_prob=0, ambiguity_list=None):
self.feature_name = name
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.complexity_prob = complexity_prob
self.ambiguity_list = ambiguity_list
if self.upper_bound < 1:
self.upper_bound = 1
if self.lower_bound > self.upper_bound:
self.lower_bound = self.upper_bound - 1
def __str__(self):
complex = False
mystring = self.feature_name + "="
if self.complexity_prob > 0 and self.ambiguity_list is \
not None and len(self.ambiguity_list) > 0:
pick = random.randint(0, 100)
if pick <= self.complexity_prob:
complex = True
if complex:
pick = random.randint(0, len(self.ambiguity_list) - 1)
mystring += str(self.ambiguity_list[pick])
else:
mystring += str(random.randint(self.lower_bound, self.upper_bound))
return mystring
def testValidFeature(self, line=0):
valid = True
if self.feature_name is None:
valid = False
print("Feature at line " + str(line) + " missing name parameter.")
if self.complexity_prob > 0 and self.ambiguity_list is None:
print("Feature at line " + str(line) + " having complexity")
print("probability greater than 0 but there is no ambiguity_list.")
valid = False
return valid
class ContentFeature(Feature):
def __init__(self, name="content", regex=True, complexity_prob=0, len=0,
min_regex_length=3):
super().__init__(name, complexity_prob=complexity_prob)
self.regex = regex
self.length = len
self.min_regex_length = min_regex_length
def __str__(self):
mystring = self.feature_name + "="
complex = False
if self.complexity_prob > 0:
pick = random.randint(0, 100)
if pick <= self.complexity_prob:
complex = True
if self.regex:
mystring += "/"
if complex:
mystring += generate_regex(self.length, 0,
[60, 30, 10],
None, None,
[20, 20, 40, 20],
50, 30, self.min_regex_length)
else:
mystring += generate_regex(self.length, 0,
[100, 0, 0],
[20, 35, 20, 20, 0],
None, None, 0, 0, self.min_regex_length)
if self.regex:
mystring += "/"
if complex:
pick = random.randint(0, 100)
if pick > 50:
mystring += "i"
if pick > 75:
mystring += "m"
if pick > 85:
mystring += "s"
return mystring
def testValidFeature(self, line=0):
valid = True
if self.feature_name is None:
valid = False
print("Feature at line " + str(line) + " missing name parameter.")
return valid
class ProtocolFeature(Feature):
def __init__(self, name="proto", proto_list=None, complexity_prob=0,
ambiguity_list=None):
super().__init__(name, complexity_prob=complexity_prob,
ambiguity_list=ambiguity_list)
self.proto_list = proto_list
def __str__(self):
complex = False
if self.complexity_prob > 0 and self.ambiguity_list is not None:
pick = random.randint(0, 100)
if pick <= self.complexity_prob:
complex = True
if complex:
myproto = str(random.choice(self.ambiguity_list))
else:
myproto = random.choice(self.proto_list)
mystring = self.feature_name + "=" + myproto
return mystring
def testValidFeature(self, line=0):
valid = True
if self.feature_name is None:
valid = False
print("Feature at line " + str(line) + " missing name parameter.")
if self.proto_list is None:
valid = False
print("Feature at line " + str(line) +
" missing proto_list parameter.")
if self.complexity_prob > 0 and self.ambiguity_list is None:
print("Feature at line " + str(line) + " having complexity")
print("probability greater than 0 but there is no ambiguity_list.")
valid = False
return valid
class IPFeature(Feature):
def __init__(self, name="ip", version=4, complexity_prob=0):
super().__init__(name, complexity_prob=complexity_prob)
self.version = version
def __str__(self):
mystring = self.feature_name + "="
myip = []
complex = False
if self.complexity_prob > 0:
pick = random.randint(0, 100)
if pick <= self.complexity_prob:
complex = True
if complex:
totalbytes = 4
if self.version == 6:
totalbytes = 16
mynetmask = random.randint(0, totalbytes * 8)
myprefixbytes = int(mynetmask / 8)
myremainder = mynetmask % 8
mask = ((2**myremainder) - 1) << (8 - myremainder)
index = 0
while index < myprefixbytes:
if self.version == 4:
myip.append(random.randint(0, 255))
else:
if (myprefixbytes - index) > 1:
myip.append(random.randint(0, 65535))
index += 1
else:
break
index += 1
mypartialbyte = (random.randint(0, 255) & mask)
last_bytes = totalbytes - myprefixbytes
if (myprefixbytes - index) == 1:
mypartialbyte += (random.randint(0, 255)) << 8
elif self.version == 6:
mypartialbyte = mypartialbyte << 8
if mypartialbyte > 0:
myip.append(mypartialbyte)
last_bytes -= 1
if self.version == 6:
remain = 8 - len(myip)
for _ in range(remain):
myip.append(0)
else:
while last_bytes > 0:
myip.append(0)
last_bytes -= 1
if self.version == 6:
last_bytes -= 1
if self.version == 4:
myipstring = '.'.join(['%d' % byte for byte in myip])
else:
myipstring = ':'.join(['%04x' % byte for byte in myip])
myipstring += "/" + str(mynetmask)
else:
if self.version == 4:
for _ in range(4):
myip.append(random.randint(0, 255))
elif self.version == 6:
myip.append(0x2001)
myip.append(random.randint(0x0000, 0x01F8) + 0x400)
for _ in range(0, 6):
myip.append(random.randint(0, 65535))
else:
print("Error, no IP version: ", self.version)
if self.version == 4:
myipstring = '.'.join(['%d' % byte for byte in myip])
else:
myipstring = ':'.join(['%04x' % byte for byte in myip])
mystring += myipstring
return mystring
def testValidFeature(self, line=0):
valid = True
if self.feature_name is None:
valid = False
print("Feature at line " + str(line) + " missing name parameter.")
if not (int(self.version) == 4 or int(self.version) == 6):
print("Feature at line " + str(line) + " has invalid version.")
valid = False
return valid
# Features are defined in a semi-colon separated list one feature per line
# type=feature; list of arguments in key=value pairs, lists using
# python formatting (i.e. [a, ..., z]
# types are:
# 1. Feature -- generic feature
# 2. Content -- Content Feature
# 3. IP -- IP Feature
# 4. Protocol -- Protocol Feature
#
# ambiguous features should be written as lists like [x:y]
# for a range, [x,y] for a list with maximum of 10
# or just * for a wildcard or similar single option.
class FeatureParser:
def __init__(self, filename=None):
self.features = []
self.parseFile(filename)
def parseFile(self, filename=None):
if filename is not None:
try:
fd = open(filename, encoding='utf-8')
except Exception as err:
print("Could not read feature file.")
print("FeatureParser-parseFile: " + str(err))
raise Exception("The program will stop.")
line = fd.readline()
lineNumber = 1
while line:
try:
self.parseLine(line, lineNumber)
except Exception as err:
print("FeatureParser-parseFile: " + str(err))
raise Exception("The program will stop.")
line = fd.readline()
lineNumber += 1
fd.close()
return True
return False
def getFeatures(self):
return self.features
def parseLine(self, line=None, lineNumber=0):
if line:
myelements = line.split(';')
mypairs = {}
while myelements:
element = myelements.pop(0).strip()
if element:
values = element.split('=')
mypairs[values[0].strip().lower()] = values[1].strip()
myfeature = None
name = None
lower_bound = 0
upper_bound = 0
complexity_prob = 0
ambiguity_list = None
regex = False
len = 0
proto_list = None
version = 4
min_regex_length = 3
if 'name' in mypairs:
name = mypairs['name']
if 'lower_bound' in mypairs:
lower_bound = int(mypairs['lower_bound'])
if 'upper_bound' in mypairs:
upper_bound = int(mypairs['upper_bound'])
if 'complexity_prob' in mypairs:
complexity_prob = int(mypairs['complexity_prob'])
if 'ambiguity_list' in mypairs:
ambiguity_list = self.buildAmbiguityList(
mypairs['ambiguity_list'])
if 'regex' in mypairs:
if mypairs['regex'].lower() == 'true':
regex = True
if 'len' in mypairs:
len = int(mypairs['len'])
if 'min_regex_length' in mypairs:
min_regex_length = int(mypairs['min_regex_length'])
if min_regex_length < 1:
min_regex_length = 1
if 'proto_list' in mypairs:
plist = mypairs['proto_list']
plist = plist[1:-1]
pvals = plist.split(",")
proto_list = []
for p in pvals:
proto_list.append(p)
if 'version' in mypairs:
version = int(mypairs['version'])
if 'type' not in mypairs:
raise Exception("Feature type Not specified:", line)
if mypairs['type'].lower() == 'feature':
myfeature = Feature(name, lower_bound, upper_bound,
complexity_prob, ambiguity_list)
elif mypairs['type'].lower() == 'content':
myfeature = ContentFeature(name, regex, complexity_prob, len,
min_regex_length)
elif mypairs['type'].lower() == 'ip':
myfeature = IPFeature(name, version, complexity_prob)
elif mypairs['type'].lower() == 'protocol':
myfeature = ProtocolFeature(name, proto_list, complexity_prob,
ambiguity_list)
else:
raise Exception("Unrecognized feature type." + str(line))
if not myfeature.testValidFeature(lineNumber):
sys.exit()
self.features.append(myfeature)
def tokenizeAmbiguityList(self, list):
listAsString = list[1:-1]
parsedlist = ""
# remove all space
# currently, sniffles support no space
for i in range(0, len(listAsString)):
if listAsString[i] != " ":
parsedlist += listAsString[i]
values = []
currentIndex = 0
beginIndex = 0
lastIndex = len(parsedlist) - 1
while currentIndex <= lastIndex:
if parsedlist[currentIndex] == ",":
tmpStr = parsedlist[beginIndex: currentIndex]
values.append(tmpStr)
currentIndex += 1
beginIndex = currentIndex
elif parsedlist[currentIndex] == "[":
beginIndex = currentIndex
while parsedlist[currentIndex] != "]":
currentIndex += 1
currentIndex += 1
tmpStr = parsedlist[beginIndex: currentIndex]
values.append(tmpStr)
currentIndex += 1
beginIndex = currentIndex
elif parsedlist[currentIndex] == "{":
beginIndex = currentIndex
while parsedlist[currentIndex] != "}":
currentIndex += 1
currentIndex += 1
tmpStr = parsedlist[beginIndex: currentIndex]
values.append(tmpStr)
currentIndex += 1
beginIndex = currentIndex
else:
currentIndex += 1
if currentIndex > lastIndex and \
currentIndex > beginIndex:
tmpStr = parsedlist[beginIndex: currentIndex]
values.append(tmpStr)
return values
def buildAmbiguityList(self, list):
mylist = []
values = self.tokenizeAmbiguityList(list)
myamb = None
for val in values:
if ',' in val:
if "[" in val:
myamb = ListNotation(val)
elif "{" in val:
myamb = SetNotation(val)
elif ':' in val:
myamb = RangeNotation(val)
else:
myamb = AmbiguousNotation(val)
mylist.append(myamb)
return mylist
| apache-2.0 | -4,676,584,201,139,202,000 | 35.018832 | 79 | 0.509411 | false |
felliott/osf.io | website/views.py | 6 | 15898 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
from rest_framework import status as http_status
import logging
import math
import os
import requests
from future.moves.urllib.parse import unquote
from django.apps import apps
from flask import request, send_from_directory, Response, stream_with_context
from framework import sentry
from framework.auth import Auth
from framework.auth.decorators import must_be_logged_in
from framework.auth.forms import SignInForm, ForgotPasswordForm
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.forms import utils as form_utils
from framework.routing import proxy_url
from website import settings
from website.institutions.views import serialize_institution
from osf import features
from osf.models import BaseFileNode, Guid, Institution, Preprint, AbstractNode, Node, DraftNode, Registration
from addons.osfstorage.models import Region
from website.settings import EXTERNAL_EMBER_APPS, PROXY_EMBER_APPS, EXTERNAL_EMBER_SERVER_TIMEOUT, DOMAIN
from website.ember_osf_web.decorators import ember_flag_is_active
from website.ember_osf_web.views import use_ember_app
from website.project.model import has_anonymous_link
from osf.utils import permissions
from api.waffle.utils import flag_is_active, storage_i18n_flag_active
logger = logging.getLogger(__name__)
preprints_dir = os.path.abspath(os.path.join(os.getcwd(), EXTERNAL_EMBER_APPS['preprints']['path']))
ember_osf_web_dir = os.path.abspath(os.path.join(os.getcwd(), EXTERNAL_EMBER_APPS['ember_osf_web']['path']))
def serialize_contributors_for_summary(node, max_count=3):
# # TODO: Use .filter(visible=True) when chaining is fixed in django-include
users = [contrib.user for contrib in node.contributor_set.all() if contrib.visible]
contributors = []
n_contributors = len(users)
others_count = ''
for index, user in enumerate(users[:max_count]):
if index == max_count - 1 and len(users) > max_count:
separator = ' &'
others_count = str(n_contributors - 3)
elif index == len(users) - 1:
separator = ''
elif index == len(users) - 2:
separator = ' &'
else:
separator = ','
contributor = user.get_summary(formatter='surname')
contributor['user_id'] = user._primary_key
contributor['separator'] = separator
contributors.append(contributor)
return {
'contributors': contributors,
'others_count': others_count,
}
def serialize_groups_for_summary(node):
groups = node.osf_groups
n_groups = len(groups)
group_string = ''
for index, group in enumerate(groups):
if index == n_groups - 1:
separator = ''
elif index == n_groups - 2:
separator = ' & '
else:
separator = ', '
group_string = group_string + group.name + separator
return group_string
def serialize_node_summary(node, auth, primary=True, show_path=False):
is_registration = node.is_registration
summary = {
'id': node._id,
'primary': primary,
'is_registration': node.is_registration,
'is_fork': node.is_fork,
'is_pending_registration': node.is_pending_registration if is_registration else False,
'is_retracted': node.is_retracted if is_registration else False,
'is_pending_retraction': node.is_pending_retraction if is_registration else False,
'embargo_end_date': node.embargo_end_date.strftime('%A, %b. %d, %Y') if is_registration and node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo if is_registration else False,
'is_embargoed': node.is_embargoed if is_registration else False,
'archiving': node.archiving if is_registration else False,
}
parent_node = node.parent_node
user = auth.user
if node.can_view(auth):
# Re-query node with contributor guids included to prevent N contributor queries
node = AbstractNode.objects.filter(pk=node.pk).include('contributor__user__guids').get()
contributor_data = serialize_contributors_for_summary(node)
summary.update({
'can_view': True,
'can_edit': node.can_edit(auth),
'primary_id': node._id,
'url': node.url,
'primary': primary,
'api_url': node.api_url,
'title': node.title,
'category': node.category,
'is_supplemental_project': node.has_linked_published_preprints,
'childExists': Node.objects.get_children(node, active=True).exists(),
'is_admin': node.has_permission(user, permissions.ADMIN),
'is_contributor': node.is_contributor(user),
'is_contributor_or_group_member': node.is_contributor_or_group_member(user),
'logged_in': auth.logged_in,
'node_type': node.project_or_component,
'is_fork': node.is_fork,
'is_registration': is_registration,
'anonymous': has_anonymous_link(node, auth),
'registered_date': node.registered_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_registration
else None,
'forked_date': node.forked_date.strftime('%Y-%m-%d %H:%M UTC')
if node.is_fork
else None,
'ua_count': None,
'ua': None,
'non_ua': None,
'is_public': node.is_public,
'parent_title': parent_node.title if parent_node else None,
'parent_is_public': parent_node.is_public if parent_node else False,
'show_path': show_path,
'contributors': contributor_data['contributors'],
'others_count': contributor_data['others_count'],
'groups': serialize_groups_for_summary(node),
'description': node.description if len(node.description) <= 150 else node.description[0:150] + '...',
})
else:
summary['can_view'] = False
return summary
def index():
# Check if we're on an institution landing page
institution = Institution.objects.filter(domains__icontains=request.host, is_deleted=False)
if institution.exists():
institution = institution.get()
inst_dict = serialize_institution(institution)
inst_dict.update({
'redirect_url': '{}institutions/{}/'.format(DOMAIN, institution._id),
})
return inst_dict
else:
return use_ember_app()
def find_bookmark_collection(user):
Collection = apps.get_model('osf.Collection')
return Collection.objects.get(creator=user, deleted__isnull=True, is_bookmark_collection=True)
@must_be_logged_in
def dashboard(auth):
return use_ember_app()
@must_be_logged_in
@ember_flag_is_active(features.EMBER_MY_PROJECTS)
def my_projects(auth):
user = auth.user
region_list = get_storage_region_list(user)
bookmark_collection = find_bookmark_collection(user)
my_projects_id = bookmark_collection._id
return {'addons_enabled': user.get_addon_names(),
'dashboard_id': my_projects_id,
'storage_regions': region_list,
'storage_flag_is_active': storage_i18n_flag_active(),
}
def validate_page_num(page, pages):
if page < 0 or (pages and page >= pages):
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
def paginate(items, total, page, size):
pages = math.ceil(total / float(size))
validate_page_num(page, pages)
start = page * size
paginated_items = itertools.islice(items, start, start + size)
return paginated_items, pages
def reproducibility():
return redirect('/ezcuj/wiki')
def signin_form():
return form_utils.jsonify(SignInForm())
def forgot_password_form():
return form_utils.jsonify(ForgotPasswordForm(prefix='forgot_password'))
# GUID ###
def _build_guid_url(base, suffix=None):
url = '/'.join([
each.strip('/') for each in [base, suffix]
if each
])
if not isinstance(url, str):
url = url.decode('utf-8')
return u'/{0}/'.format(url)
def resolve_guid_download(guid, suffix=None, provider=None):
return resolve_guid(guid, suffix='download')
def resolve_guid(guid, suffix=None):
"""Load GUID by primary key, look up the corresponding view function in the
routing table, and return the return value of the view function without
changing the URL.
:param str guid: GUID primary key
:param str suffix: Remainder of URL after the GUID
:return: Return value of proxied view function
"""
try:
# Look up
guid_object = Guid.load(guid)
except KeyError as e:
if e.message == 'osfstorageguidfile': # Used when an old detached OsfStorageGuidFile object is accessed
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
else:
raise e
if guid_object:
# verify that the object implements a GuidStoredObject-like interface. If a model
# was once GuidStoredObject-like but that relationship has changed, it's
# possible to have referents that are instances of classes that don't
# have a deep_url attribute or otherwise don't behave as
# expected.
if not hasattr(guid_object.referent, 'deep_url'):
sentry.log_message(
'Guid resolved to an object with no deep_url', dict(guid=guid)
)
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
referent = guid_object.referent
if referent is None:
logger.error('Referent of GUID {0} not found'.format(guid))
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
if not referent.deep_url:
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
# Handle file `/download` shortcut with supported types.
if suffix and suffix.rstrip('/').lower() == 'download':
file_referent = None
if isinstance(referent, Preprint) and referent.primary_file:
file_referent = referent.primary_file
elif isinstance(referent, BaseFileNode) and referent.is_file:
file_referent = referent
if file_referent:
if isinstance(file_referent.target, Preprint) and not file_referent.target.is_published:
# TODO: Ideally, permissions wouldn't be checked here.
# This is necessary to prevent a logical inconsistency with
# the routing scheme - if a preprint is not published, only
# admins and moderators should be able to know it exists.
auth = Auth.from_kwargs(request.args.to_dict(), {})
# Check if user isn't a nonetype or that the user has admin/moderator/superuser permissions
if auth.user is None or not (auth.user.has_perm('view_submissions', file_referent.target.provider) or
file_referent.target.has_permission(auth.user, permissions.ADMIN)):
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
# Extend `request.args` adding `action=download`.
request.args = request.args.copy()
request.args.update({'action': 'download'})
# Do not include the `download` suffix in the url rebuild.
url = _build_guid_url(unquote(file_referent.deep_url))
return proxy_url(url)
# Handle Ember Applications
if isinstance(referent, Preprint):
if referent.provider.domain_redirect_enabled:
# This route should always be intercepted by nginx for the branded domain,
# w/ the exception of `<guid>/download` handled above.
return redirect(referent.absolute_url, http_status.HTTP_301_MOVED_PERMANENTLY)
if PROXY_EMBER_APPS:
resp = requests.get(EXTERNAL_EMBER_APPS['preprints']['server'], stream=True, timeout=EXTERNAL_EMBER_SERVER_TIMEOUT)
return Response(stream_with_context(resp.iter_content()), resp.status_code)
return send_from_directory(preprints_dir, 'index.html')
# Handle DraftNodes - these should never be accessed directly
if isinstance(referent, DraftNode):
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
if isinstance(referent, BaseFileNode) and referent.is_file and (getattr(referent.target, 'is_quickfiles', False)):
if referent.is_deleted:
raise HTTPError(http_status.HTTP_410_GONE)
if PROXY_EMBER_APPS:
resp = requests.get(EXTERNAL_EMBER_APPS['ember_osf_web']['server'], stream=True, timeout=EXTERNAL_EMBER_SERVER_TIMEOUT)
return Response(stream_with_context(resp.iter_content()), resp.status_code)
return send_from_directory(ember_osf_web_dir, 'index.html')
if isinstance(referent, Registration) and (
not suffix or suffix.rstrip('/').lower() in ('comments', 'links', 'components')
):
if flag_is_active(request, features.EMBER_REGISTRIES_DETAIL_PAGE):
# Route only the base detail view to ember
if PROXY_EMBER_APPS:
resp = requests.get(EXTERNAL_EMBER_APPS['ember_osf_web']['server'], stream=True, timeout=EXTERNAL_EMBER_SERVER_TIMEOUT)
return Response(stream_with_context(resp.iter_content()), resp.status_code)
return send_from_directory(ember_osf_web_dir, 'index.html')
url = _build_guid_url(unquote(referent.deep_url), suffix)
return proxy_url(url)
# GUID not found; try lower-cased and redirect if exists
guid_object_lower = Guid.load(guid.lower())
if guid_object_lower:
return redirect(
_build_guid_url(guid.lower(), suffix)
)
# GUID not found
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
# Redirects #
# redirect osf.io/about/ to OSF wiki page osf.io/4znzp/wiki/home/
def redirect_about(**kwargs):
return redirect('https://osf.io/4znzp/wiki/home/')
def redirect_help(**kwargs):
return redirect('/faq/')
def redirect_faq(**kwargs):
return redirect('https://help.osf.io/hc/en-us/articles/360019737894-FAQs')
# redirect osf.io/howosfworks to osf.io/getting-started/
def redirect_howosfworks(**kwargs):
return redirect('/getting-started/')
# redirect osf.io/getting-started to https://openscience.zendesk.com/hc/en-us
def redirect_getting_started(**kwargs):
return redirect('https://openscience.zendesk.com/hc/en-us')
# Redirect to home page
def redirect_to_home():
return redirect('/')
def redirect_to_cos_news(**kwargs):
# Redirect to COS News page
return redirect('https://cos.io/news/')
# Return error for legacy SHARE v1 search route
def legacy_share_v1_search(**kwargs):
return HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(
message_long='Please use v2 of the SHARE search API available at {}api/v2/share/search/creativeworks/_search.'.format(settings.SHARE_URL)
)
)
def get_storage_region_list(user, node=False):
if not user: # Preserves legacy frontend test behavior
return []
if node:
default_region = node.osfstorage_region
else:
default_region = user.get_addon('osfstorage').default_region
available_regions = list(Region.objects.order_by('name').values('_id', 'name'))
default_region = {'name': default_region.name, '_id': default_region._id}
available_regions.insert(0, available_regions.pop(available_regions.index(default_region))) # default should be at top of list for UI.
return available_regions
| apache-2.0 | -6,856,778,931,455,924,000 | 38.449132 | 149 | 0.645616 | false |
doismellburning/django | tests/expressions_case/tests.py | 3 | 43248 | from __future__ import unicode_literals
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from operator import attrgetter, itemgetter
import unittest
from uuid import UUID
from django.core.exceptions import FieldError
from django.db import connection, models
from django.db.models import F, Q, Value, Min, Max
from django.db.models.expressions import Case, When
from django.test import TestCase
from django.utils import six
try:
from PIL import Image
except ImportError:
Image = None
from .models import CaseTestModel, O2OCaseTestModel, FKCaseTestModel, Client
class CaseExpressionTests(TestCase):
@classmethod
def setUpTestData(cls):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=1)
o = CaseTestModel.objects.create(integer=2, integer2=3, string='2')
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=2, integer2=2, string='2')
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=3, integer2=3, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=4, integer2=5, string='4')
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=5)
# GROUP BY on Oracle fails with TextField/BinaryField; see #24096.
cls.non_lob_fields = [
f.name for f in CaseTestModel._meta.get_fields()
if not (f.is_relation and f.auto_created) and not isinstance(f, (models.BinaryField, models.TextField))
]
def test_annotate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')],
transform=attrgetter('integer', 'test')
)
def test_annotate_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value(1)),
When(integer=2, then=Value(2)),
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'test')
)
def test_annotate_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(f_test=Case(
When(integer=1, then=F('integer') + 1),
When(integer=2, then=F('integer') + 3),
default='integer',
)).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(f_test=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('integer') + 1, then=Value('+1')),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(integer=1, then=F('o2o_rel__integer') + 1),
When(integer=2, then=F('o2o_rel__integer') + 3),
default='o2o_rel__integer',
)).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 1)],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(integer2=F('o2o_rel__integer'), then=Value('equal')),
When(integer2=F('o2o_rel__integer') + 1, then=Value('+1')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, 'other')],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(o2o_rel__integer=1, then=Value('one')),
When(o2o_rel__integer=2, then=Value('two')),
When(o2o_rel__integer=3, then=Value('three')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'three'), (2, 'two'), (3, 'three'), (3, 'three'), (4, 'one')],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
f_plus_3=F('integer') + 3,
).annotate(
f_test=Case(
When(integer=1, then='f_plus_1'),
When(integer=2, then='f_plus_3'),
default='integer',
),
).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).annotate(
f_test=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('f_plus_1'), then=Value('+1')),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_minus_2=F('integer') - 2,
).annotate(
test=Case(
When(f_minus_2=-1, then=Value('negative one')),
When(f_minus_2=0, then=Value('zero')),
When(f_minus_2=1, then=Value('one')),
default=Value('other'),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 'negative one'), (2, 'zero'), (3, 'one'), (2, 'zero'), (3, 'one'), (3, 'one'), (4, 'other')],
transform=attrgetter('integer', 'test')
)
def test_annotate_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(integer=2, then='min'),
When(integer=3, then='max'),
),
).order_by('pk'),
[(1, None, 1, 1), (2, 2, 2, 3), (3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4), (3, 4, 3, 4), (4, None, 5, 5)],
transform=itemgetter('integer', 'test', 'min', 'max')
)
def test_annotate_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(integer2=F('min'), then=Value('min')),
When(integer2=F('max'), then=Value('max')),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 1, 'min'), (2, 3, 'max'), (3, 4, 'max'), (2, 2, 'min'), (3, 4, 'max'), (3, 3, 'min'), (4, 5, 'min')],
transform=itemgetter('integer', 'integer2', 'test')
)
def test_annotate_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(max=3, then=Value('max = 3')),
When(max=4, then=Value('max = 4')),
default=Value(''),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 1, ''), (2, 3, 'max = 3'), (3, 4, 'max = 4'), (2, 3, 'max = 3'),
(3, 4, 'max = 4'), (3, 4, 'max = 4'), (4, 5, '')],
transform=itemgetter('integer', 'max', 'test')
)
def test_combined_expression(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=Value(2)),
When(integer=2, then=Value(1)),
default=Value(3),
output_field=models.IntegerField(),
) + 1,
).order_by('pk'),
[(1, 3), (2, 2), (3, 4), (2, 2), (3, 4), (3, 4), (4, 4)],
transform=attrgetter('integer', 'test')
)
if connection.vendor == 'sqlite' and connection.Database.sqlite_version_info < (3, 7, 0):
# There is a bug in sqlite < 3.7.0, where placeholder order is lost.
# Thus, the above query returns <condition_value> + <result_value>
# for each matching case instead of <result_value> + 1 (#24148).
test_combined_expression = unittest.expectedFailure(test_combined_expression)
def test_in_subquery(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
pk__in=CaseTestModel.objects.annotate(
test=Case(
When(integer=F('integer2'), then='pk'),
When(integer=4, then='pk'),
output_field=models.IntegerField(),
),
).values('test')).order_by('pk'),
[(1, 1), (2, 2), (3, 3), (4, 5)],
transform=attrgetter('integer', 'integer2')
)
def test_aggregate(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=models.Sum(Case(
When(integer=1, then=Value(1)),
output_field=models.IntegerField(),
)),
two=models.Sum(Case(
When(integer=2, then=Value(1)),
output_field=models.IntegerField(),
)),
three=models.Sum(Case(
When(integer=3, then=Value(1)),
output_field=models.IntegerField(),
)),
four=models.Sum(Case(
When(integer=4, then=Value(1)),
output_field=models.IntegerField(),
)),
),
{'one': 1, 'two': 2, 'three': 3, 'four': 1}
)
def test_aggregate_with_expression_as_value(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=models.Sum(Case(When(integer=1, then='integer'))),
two=models.Sum(Case(When(integer=2, then=F('integer') - 1))),
three=models.Sum(Case(When(integer=3, then=F('integer') + 1))),
),
{'one': 1, 'two': 2, 'three': 12}
)
def test_aggregate_with_expression_as_condition(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
equal=models.Sum(Case(
When(integer2=F('integer'), then=Value(1)),
output_field=models.IntegerField(),
)),
plus_one=models.Sum(Case(
When(integer2=F('integer') + 1, then=Value(1)),
output_field=models.IntegerField(),
)),
),
{'equal': 3, 'plus_one': 4}
)
def test_filter(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=Value(3)),
When(integer=3, then=Value(4)),
default=Value(1),
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=Value(3)),
When(integer=3, then=Value(4)),
output_field=models.IntegerField(),
)).order_by('pk'),
[(2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=F('integer') + 1),
When(integer=3, then=F('integer')),
default='integer',
)).order_by('pk'),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string=Case(
When(integer2=F('integer'), then=Value('2')),
When(integer2=F('integer') + 1, then=Value('3')),
output_field=models.CharField(),
)).order_by('pk'),
[(3, 4, '3'), (2, 2, '2'), (3, 4, '3')],
transform=attrgetter('integer', 'integer2', 'string')
)
def test_filter_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=F('o2o_rel__integer') + 1),
When(integer=3, then=F('o2o_rel__integer')),
default='o2o_rel__integer',
)).order_by('pk'),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer=Case(
When(integer2=F('o2o_rel__integer') + 1, then=Value(2)),
When(integer2=F('o2o_rel__integer'), then=Value(3)),
output_field=models.IntegerField(),
)).order_by('pk'),
[(2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(o2o_rel__integer=1, then=Value(1)),
When(o2o_rel__integer=2, then=Value(3)),
When(o2o_rel__integer=3, then=Value(4)),
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f=F('integer'),
f_plus_1=F('integer') + 1,
).filter(
integer2=Case(
When(integer=2, then='f_plus_1'),
When(integer=3, then='f'),
),
).order_by('pk'),
[(2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).filter(
integer=Case(
When(integer2=F('integer'), then=Value(2)),
When(integer2=F('f_plus_1'), then=Value(3)),
output_field=models.IntegerField(),
),
).order_by('pk'),
[(3, 4), (2, 2), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).filter(
integer2=Case(
When(f_plus_1=3, then=Value(3)),
When(f_plus_1=4, then=Value(4)),
default=Value(1),
output_field=models.IntegerField(),
),
).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).filter(
integer2=Case(
When(integer=2, then='min'),
When(integer=3, then='max'),
),
).order_by('pk'),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter('integer', 'integer2', 'min', 'max')
)
def test_filter_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).filter(
integer=Case(
When(integer2=F('min'), then=Value(2)),
When(integer2=F('max'), then=Value(3)),
),
).order_by('pk'),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter('integer', 'integer2', 'min', 'max')
)
def test_filter_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
max=Max('fk_rel__integer'),
).filter(
integer=Case(
When(max=3, then=Value(2)),
When(max=4, then=Value(3)),
),
).order_by('pk'),
[(2, 3, 3), (3, 4, 4), (2, 2, 3), (3, 4, 4), (3, 3, 4)],
transform=itemgetter('integer', 'integer2', 'max')
)
def test_update(self):
CaseTestModel.objects.update(
string=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')],
transform=attrgetter('integer', 'string')
)
def test_update_without_default(self):
CaseTestModel.objects.update(
integer2=Case(
When(integer=1, then=Value(1)),
When(integer=2, then=Value(2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'integer2')
)
def test_update_with_expression_as_value(self):
CaseTestModel.objects.update(
integer=Case(
When(integer=1, then=F('integer') + 1),
When(integer=2, then=F('integer') + 3),
default='integer',
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[('1', 2), ('2', 5), ('3', 3), ('2', 5), ('3', 3), ('3', 3), ('4', 4)],
transform=attrgetter('string', 'integer')
)
def test_update_with_expression_as_condition(self):
CaseTestModel.objects.update(
string=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('integer') + 1, then=Value('+1')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'string')
)
def test_update_with_join_in_condition_raise_field_error(self):
with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'):
CaseTestModel.objects.update(
integer=Case(
When(integer2=F('o2o_rel__integer') + 1, then=Value(2)),
When(integer2=F('o2o_rel__integer'), then=Value(3)),
output_field=models.IntegerField(),
),
)
def test_update_with_join_in_predicate_raise_field_error(self):
with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'):
CaseTestModel.objects.update(
string=Case(
When(o2o_rel__integer=1, then=Value('one')),
When(o2o_rel__integer=2, then=Value('two')),
When(o2o_rel__integer=3, then=Value('three')),
default=Value('other'),
output_field=models.CharField(),
),
)
def test_update_big_integer(self):
CaseTestModel.objects.update(
big_integer=Case(
When(integer=1, then=Value(1)),
When(integer=2, then=Value(2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'big_integer')
)
def test_update_binary(self):
CaseTestModel.objects.update(
binary=Case(
# fails on postgresql on Python 2.7 if output_field is not
# set explicitly
When(integer=1, then=Value(b'one', output_field=models.BinaryField())),
When(integer=2, then=Value(b'two', output_field=models.BinaryField())),
default=Value(b'', output_field=models.BinaryField()),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, b'one'), (2, b'two'), (3, b''), (2, b'two'), (3, b''), (3, b''), (4, b'')],
transform=lambda o: (o.integer, six.binary_type(o.binary))
)
def test_update_boolean(self):
CaseTestModel.objects.update(
boolean=Case(
When(integer=1, then=Value(True)),
When(integer=2, then=Value(True)),
default=Value(False),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, True), (3, False), (2, True), (3, False), (3, False), (4, False)],
transform=attrgetter('integer', 'boolean')
)
def test_update_comma_separated_integer(self):
CaseTestModel.objects.update(
comma_separated_integer=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2,2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2,2'), (3, ''), (2, '2,2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'comma_separated_integer')
)
def test_update_date(self):
CaseTestModel.objects.update(
date=Case(
When(integer=1, then=Value(date(2015, 1, 1))),
When(integer=2, then=Value(date(2015, 1, 2))),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, date(2015, 1, 1)), (2, date(2015, 1, 2)), (3, None), (2, date(2015, 1, 2)),
(3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'date')
)
def test_update_date_time(self):
CaseTestModel.objects.update(
date_time=Case(
When(integer=1, then=Value(datetime(2015, 1, 1))),
When(integer=2, then=Value(datetime(2015, 1, 2))),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, datetime(2015, 1, 1)), (2, datetime(2015, 1, 2)), (3, None), (2, datetime(2015, 1, 2)),
(3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'date_time')
)
def test_update_decimal(self):
CaseTestModel.objects.update(
decimal=Case(
When(integer=1, then=Value(Decimal('1.1'))),
When(integer=2, then=Value(Decimal('2.2'))),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, Decimal('1.1')), (2, Decimal('2.2')), (3, None), (2, Decimal('2.2')), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'decimal')
)
def test_update_duration(self):
CaseTestModel.objects.update(
duration=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing timedeltas
When(integer=1, then=Value(timedelta(1), output_field=models.DurationField())),
When(integer=2, then=Value(timedelta(2), output_field=models.DurationField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, timedelta(1)), (2, timedelta(2)), (3, None), (2, timedelta(2)), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'duration')
)
def test_update_email(self):
CaseTestModel.objects.update(
email=Case(
When(integer=1, then=Value('[email protected]')),
When(integer=2, then=Value('[email protected]')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '[email protected]'), (2, '[email protected]'), (3, ''), (2, '[email protected]'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'email')
)
def test_update_file(self):
CaseTestModel.objects.update(
file=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=lambda o: (o.integer, six.text_type(o.file))
)
def test_update_file_path(self):
CaseTestModel.objects.update(
file_path=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'file_path')
)
def test_update_float(self):
CaseTestModel.objects.update(
float=Case(
When(integer=1, then=Value(1.1)),
When(integer=2, then=Value(2.2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1.1), (2, 2.2), (3, None), (2, 2.2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'float')
)
@unittest.skipUnless(Image, "Pillow not installed")
def test_update_image(self):
CaseTestModel.objects.update(
image=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=lambda o: (o.integer, six.text_type(o.image))
)
def test_update_generic_ip_address(self):
CaseTestModel.objects.update(
generic_ip_address=Case(
# fails on postgresql if output_field is not set explicitly
When(integer=1, then=Value('1.1.1.1')),
When(integer=2, then=Value('2.2.2.2')),
output_field=models.GenericIPAddressField(),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1.1.1.1'), (2, '2.2.2.2'), (3, None), (2, '2.2.2.2'), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'generic_ip_address')
)
def test_update_null_boolean(self):
CaseTestModel.objects.update(
null_boolean=Case(
When(integer=1, then=Value(True)),
When(integer=2, then=Value(False)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, False), (3, None), (2, False), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'null_boolean')
)
def test_update_positive_integer(self):
CaseTestModel.objects.update(
positive_integer=Case(
When(integer=1, then=Value(1)),
When(integer=2, then=Value(2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_integer')
)
def test_update_positive_small_integer(self):
CaseTestModel.objects.update(
positive_small_integer=Case(
When(integer=1, then=Value(1)),
When(integer=2, then=Value(2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_small_integer')
)
def test_update_slug(self):
CaseTestModel.objects.update(
slug=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'slug')
)
def test_update_small_integer(self):
CaseTestModel.objects.update(
small_integer=Case(
When(integer=1, then=Value(1)),
When(integer=2, then=Value(2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'small_integer')
)
def test_update_string(self):
CaseTestModel.objects.filter(string__in=['1', '2']).update(
string=Case(
When(integer=1, then=Value('1', output_field=models.CharField())),
When(integer=2, then=Value('2', output_field=models.CharField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string__in=['1', '2']).order_by('pk'),
[(1, '1'), (2, '2'), (2, '2')],
transform=attrgetter('integer', 'string')
)
def test_update_text(self):
CaseTestModel.objects.update(
text=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'text')
)
def test_update_time(self):
CaseTestModel.objects.update(
time=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing times
When(integer=1, then=Value(time(1), output_field=models.TimeField())),
When(integer=2, then=Value(time(2), output_field=models.TimeField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, time(1)), (2, time(2)), (3, None), (2, time(2)), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'time')
)
def test_update_url(self):
CaseTestModel.objects.update(
url=Case(
When(integer=1, then=Value('http://1.example.com/')),
When(integer=2, then=Value('http://2.example.com/')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, 'http://1.example.com/'), (2, 'http://2.example.com/'), (3, ''), (2, 'http://2.example.com/'),
(3, ''), (3, ''), (4, '')
],
transform=attrgetter('integer', 'url')
)
def test_update_uuid(self):
CaseTestModel.objects.update(
uuid=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing UUIDs
When(integer=1, then=Value(
UUID('11111111111111111111111111111111'),
output_field=models.UUIDField(),
)),
When(integer=2, then=Value(
UUID('22222222222222222222222222222222'),
output_field=models.UUIDField(),
)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, UUID('11111111111111111111111111111111')), (2, UUID('22222222222222222222222222222222')), (3, None),
(2, UUID('22222222222222222222222222222222')), (3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'uuid')
)
def test_update_fk(self):
obj1, obj2 = CaseTestModel.objects.all()[:2]
CaseTestModel.objects.update(
fk=Case(
When(integer=1, then=Value(obj1.pk)),
When(integer=2, then=Value(obj2.pk)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, obj1.pk), (2, obj2.pk), (3, None), (2, obj2.pk), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'fk_id')
)
def test_lookup_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer__lt=2, then=Value('less than 2')),
When(integer__gt=2, then=Value('greater than 2')),
default=Value('equal to 2'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 'less than 2'), (2, 'equal to 2'), (3, 'greater than 2'), (2, 'equal to 2'), (3, 'greater than 2'),
(3, 'greater than 2'), (4, 'greater than 2')
],
transform=attrgetter('integer', 'test')
)
def test_lookup_different_fields(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=2, integer2=3, then=Value('when')),
default=Value('default'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'default'), (3, 4, 'default'),
(3, 3, 'default'), (4, 5, 'default')
],
transform=attrgetter('integer', 'integer2', 'test')
)
def test_combined_q_object(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(Q(integer=2) | Q(integer2=3), then=Value('when')),
default=Value('default'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'when'), (3, 4, 'default'),
(3, 3, 'when'), (4, 5, 'default')
],
transform=attrgetter('integer', 'integer2', 'test')
)
class CaseDocumentationExamples(TestCase):
@classmethod
def setUpTestData(cls):
Client.objects.create(
name='Jane Doe',
account_type=Client.REGULAR,
registered_on=date.today() - timedelta(days=36),
)
Client.objects.create(
name='James Smith',
account_type=Client.GOLD,
registered_on=date.today() - timedelta(days=5),
)
Client.objects.create(
name='Jack Black',
account_type=Client.PLATINUM,
registered_on=date.today() - timedelta(days=10 * 365),
)
def test_simple_example(self):
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(account_type=Client.GOLD, then=Value('5%')),
When(account_type=Client.PLATINUM, then=Value('10%')),
default=Value('0%'),
output_field=models.CharField(),
),
).order_by('pk'),
[('Jane Doe', '0%'), ('James Smith', '5%'), ('Jack Black', '10%')],
transform=attrgetter('name', 'discount')
)
def test_lookup_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(registered_on__lte=a_year_ago, then=Value('10%')),
When(registered_on__lte=a_month_ago, then=Value('5%')),
default=Value('0%'),
output_field=models.CharField(),
),
).order_by('pk'),
[('Jane Doe', '5%'), ('James Smith', '0%'), ('Jack Black', '10%')],
transform=attrgetter('name', 'discount')
)
def test_conditional_update_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
Client.objects.update(
account_type=Case(
When(registered_on__lte=a_year_ago, then=Value(Client.PLATINUM)),
When(registered_on__lte=a_month_ago, then=Value(Client.GOLD)),
default=Value(Client.REGULAR),
),
)
self.assertQuerysetEqual(
Client.objects.all().order_by('pk'),
[('Jane Doe', 'G'), ('James Smith', 'R'), ('Jack Black', 'P')],
transform=attrgetter('name', 'account_type')
)
def test_conditional_aggregation_example(self):
Client.objects.create(
name='Jean Grey',
account_type=Client.REGULAR,
registered_on=date.today(),
)
Client.objects.create(
name='James Bond',
account_type=Client.PLATINUM,
registered_on=date.today(),
)
Client.objects.create(
name='Jane Porter',
account_type=Client.PLATINUM,
registered_on=date.today(),
)
self.assertEqual(
Client.objects.aggregate(
regular=models.Sum(Case(
When(account_type=Client.REGULAR, then=Value(1)),
output_field=models.IntegerField(),
)),
gold=models.Sum(Case(
When(account_type=Client.GOLD, then=Value(1)),
output_field=models.IntegerField(),
)),
platinum=models.Sum(Case(
When(account_type=Client.PLATINUM, then=Value(1)),
output_field=models.IntegerField(),
)),
),
{'regular': 2, 'gold': 1, 'platinum': 3}
)
| bsd-3-clause | -861,671,593,887,562,100 | 39.007401 | 120 | 0.493873 | false |
afaheem88/rally | tests/unit/plugins/openstack/context/murano/test_murano_packages.py | 11 | 4025 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.context.murano import murano_packages
from tests.unit import test
CTX = "rally.plugins.openstack.context.murano.murano_packages"
class MuranoGeneratorTestCase(test.TestCase):
def setUp(self):
super(MuranoGeneratorTestCase, self).setUp()
@staticmethod
def _get_context():
return {
"config": {
"users": {
"tenants": 2,
"users_per_tenant": 1,
"concurrent": 1,
},
"murano_packages": {
"app_package": (
"rally-jobs/extra/murano/"
"applications/HelloReporter/"
"io.murano.apps.HelloReporter.zip")
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"task": mock.MagicMock(),
"users": [
{
"id": "user_0",
"tenant_id": "tenant_0",
"endpoint": "endpoint"
},
{
"id": "user_1",
"tenant_id": "tenant_1",
"endpoint": "endpoint"
}
],
"tenants": {
"tenant_0": {"name": "tenant_0_name"},
"tenant_1": {"name": "tenant_1_name"}
}
}
@mock.patch("%s.osclients" % CTX)
def test_setup(self, mock_osclients):
mock_app = mock.MagicMock(id="fake_app_id")
(mock_osclients.Clients().murano().
packages.create.return_value) = mock_app
murano_ctx = murano_packages.PackageGenerator(self._get_context())
murano_ctx.setup()
self.assertEqual(2, len(murano_ctx.context["tenants"]))
tenant_id = murano_ctx.context["users"][0]["tenant_id"]
self.assertEqual([mock_app],
murano_ctx.context["tenants"][tenant_id]["packages"])
@mock.patch("%s.osclients" % CTX)
@mock.patch("%s.resource_manager.cleanup" % CTX)
def test_cleanup_with_zip(self, mock_cleanup, mock_osclients):
mock_app = mock.Mock(id="fake_app_id")
(mock_osclients.Clients().murano().
packages.create.return_value) = mock_app
murano_ctx = murano_packages.PackageGenerator(self._get_context())
murano_ctx.setup()
murano_ctx.cleanup()
mock_cleanup.assert_called_once_with(names=["murano.packages"],
users=murano_ctx.context["users"])
@mock.patch("%s.osclients" % CTX)
@mock.patch("%s.resource_manager.cleanup" % CTX)
def test_cleanup_with_dir(self, mock_cleanup, mock_osclients):
mock_app = mock.Mock(id="fake_app_id")
(mock_osclients.Clients().murano().
packages.create.return_value) = mock_app
ctx_dict = self._get_context()
app_dir = ("rally-jobs/extra/murano/applications/"
"HelloReporter/io.murano.apps.HelloReporter/")
ctx_dict["config"]["murano_packages"]["app_package"] = app_dir
murano_ctx = murano_packages.PackageGenerator(ctx_dict)
murano_ctx.setup()
murano_ctx.cleanup()
mock_cleanup.assert_called_once_with(names=["murano.packages"],
users=murano_ctx.context["users"])
| apache-2.0 | -6,694,020,186,566,969,000 | 35.261261 | 79 | 0.550311 | false |
jabesq/home-assistant | homeassistant/components/gitlab_ci/sensor.py | 2 | 5889 | """Sensor for retrieving latest GitLab CI job information."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_NAME, CONF_SCAN_INTERVAL, CONF_TOKEN, CONF_URL)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_BUILD_BRANCH = 'build branch'
ATTR_BUILD_COMMIT_DATE = 'commit date'
ATTR_BUILD_COMMIT_ID = 'commit id'
ATTR_BUILD_DURATION = 'build_duration'
ATTR_BUILD_FINISHED = 'build_finished'
ATTR_BUILD_ID = 'build id'
ATTR_BUILD_STARTED = 'build_started'
ATTR_BUILD_STATUS = 'build_status'
ATTRIBUTION = "Information provided by https://gitlab.com/"
CONF_GITLAB_ID = 'gitlab_id'
DEFAULT_NAME = 'GitLab CI Status'
DEFAULT_URL = 'https://gitlab.com'
ICON_HAPPY = 'mdi:emoticon-happy'
ICON_OTHER = 'mdi:git'
ICON_SAD = 'mdi:emoticon-sad'
SCAN_INTERVAL = timedelta(seconds=300)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_GITLAB_ID): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_URL, default=DEFAULT_URL): cv.string
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GitLab sensor platform."""
_name = config.get(CONF_NAME)
_interval = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL)
_url = config.get(CONF_URL)
_gitlab_data = GitLabData(
priv_token=config[CONF_TOKEN],
gitlab_id=config[CONF_GITLAB_ID],
interval=_interval,
url=_url
)
add_entities([GitLabSensor(_gitlab_data, _name)], True)
class GitLabSensor(Entity):
"""Representation of a GitLab sensor."""
def __init__(self, gitlab_data, name):
"""Initialize the GitLab sensor."""
self._available = False
self._state = None
self._started_at = None
self._finished_at = None
self._duration = None
self._commit_id = None
self._commit_date = None
self._build_id = None
self._branch = None
self._gitlab_data = gitlab_data
self._name = name
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_BUILD_STATUS: self._state,
ATTR_BUILD_STARTED: self._started_at,
ATTR_BUILD_FINISHED: self._finished_at,
ATTR_BUILD_DURATION: self._duration,
ATTR_BUILD_COMMIT_ID: self._commit_id,
ATTR_BUILD_COMMIT_DATE: self._commit_date,
ATTR_BUILD_ID: self._build_id,
ATTR_BUILD_BRANCH: self._branch
}
@property
def icon(self):
"""Return the icon to use in the frontend."""
if self._state == 'success':
return ICON_HAPPY
if self._state == 'failed':
return ICON_SAD
return ICON_OTHER
def update(self):
"""Collect updated data from GitLab API."""
self._gitlab_data.update()
self._state = self._gitlab_data.status
self._started_at = self._gitlab_data.started_at
self._finished_at = self._gitlab_data.finished_at
self._duration = self._gitlab_data.duration
self._commit_id = self._gitlab_data.commit_id
self._commit_date = self._gitlab_data.commit_date
self._build_id = self._gitlab_data.build_id
self._branch = self._gitlab_data.branch
self._available = self._gitlab_data.available
class GitLabData():
"""GitLab Data object."""
def __init__(self, gitlab_id, priv_token, interval, url):
"""Fetch data from GitLab API for most recent CI job."""
import gitlab
self._gitlab_id = gitlab_id
self._gitlab = gitlab.Gitlab(
url, private_token=priv_token, per_page=1)
self._gitlab.auth()
self._gitlab_exceptions = gitlab.exceptions
self.update = Throttle(interval)(self._update)
self.available = False
self.status = None
self.started_at = None
self.finished_at = None
self.duration = None
self.commit_id = None
self.commit_date = None
self.build_id = None
self.branch = None
def _update(self):
try:
_projects = self._gitlab.projects.get(self._gitlab_id)
_last_pipeline = _projects.pipelines.list(page=1)[0]
_last_job = _last_pipeline.jobs.list(page=1)[0]
self.status = _last_pipeline.attributes.get('status')
self.started_at = _last_job.attributes.get('started_at')
self.finished_at = _last_job.attributes.get('finished_at')
self.duration = _last_job.attributes.get('duration')
_commit = _last_job.attributes.get('commit')
self.commit_id = _commit.get('id')
self.commit_date = _commit.get('committed_date')
self.build_id = _last_job.attributes.get('id')
self.branch = _last_job.attributes.get('ref')
self.available = True
except self._gitlab_exceptions.GitlabAuthenticationError as erra:
_LOGGER.error("Authentication Error: %s", erra)
self.available = False
except self._gitlab_exceptions.GitlabGetError as errg:
_LOGGER.error("Project Not Found: %s", errg)
self.available = False
| apache-2.0 | 7,485,731,597,661,238,000 | 32.651429 | 74 | 0.625064 | false |
falgore88/grafana-metrics | grafana_metrics/metrics/base.py | 1 | 1754 | # coding: utf-8
from __future__ import unicode_literals
from six import python_2_unicode_compatible
@python_2_unicode_compatible
class MetricData(object):
def __init__(self, name, fields, tags=None, *args, **kwargs):
"""
:param name: string
:param value: any
:param tags: list
:param time: datetime
"""
self.name = name
self.tags = tags or {}
self.fields = fields
def __str__(self):
metric_string = "name=%s" % self.name
if self.tags:
metric_string += ",{}".format(",".join(["%s=%s" % (k, v) for k, v in self.tags.items()]))
metric_string += ",{}".format(",".join(["%s=%s" % (k, v) for k, v in self.fields.items()]))
return metric_string
def to_influx(self):
row = {
'measurement': self.name,
}
if self.tags:
row['tags'] = self.tags
if self.fields:
row['fields'] = self.fields
return row
def __repr__(self):
return self.__unicode__().encode("utf-8")
class Metric(object):
TYPE = None
def __init__(self, measurement, tags=None, interval=60, timeout=30, *args, **kwargs):
self.measurement = measurement
self.tags = tags or {}
self.interval = int(interval)
self.timeout = int(timeout) if timeout else None
def collect(self):
raise NotImplemented
def get_name(self):
name = "%s(%s)" % (self.measurement, self.TYPE)
if self.tags:
name += " {}".format(",".join(["%s=%s" % (k, v) for k, v in self.tags.items()]))
return name
def __repl__(self):
return self.get_name()
def __unicode__(self):
return self.get_name()
| apache-2.0 | -1,528,444,064,236,114,200 | 25.984615 | 101 | 0.534208 | false |
pgroth/independence-indicators | Temporal-Coauthor-Networks/vincent/build/lib/vincent/visualization.py | 2 | 9694 | # -*- coding: utf-8 -*-
"""
Visualization: Top level class for Vega Grammar
"""
from __future__ import (print_function, division)
import random
from .core import (_assert_is_type, ValidationError,
KeyedList, grammar, GrammarClass)
from .data import Data
from .scales import Scale
from .marks import Mark
from .axes import Axis
from .legends import Legend
from .colors import brews
from ._compat import str_types
class Visualization(GrammarClass):
"""Visualization container class.
This class defines the full visualization. Calling its ``to_json``
method should return a complete Vega definition.
The sub-elements of the visualization are stored in the ``data``,
``axes``, ``marks``, and ``scales`` attributes. See the docs for each
attribute for details.
"""
def __init__(self, *args, **kwargs):
"""Initialize a Visualization
In addition to setting any attributes, this sets the data, marks,
scales, and axes properties to empty KeyedLists if they aren't
defined by the arguments.
"""
super(Visualization, self).__init__(*args, **kwargs)
for attrib in ('data', 'scales'):
if not getattr(self, attrib):
setattr(self, attrib, KeyedList(attr_name='name'))
for attrib in ('axes', 'marks'):
if not getattr(self, attrib):
setattr(self, attrib, KeyedList(attr_name='type'))
# Legends don't get keyed.
if not self.legends:
self.legends = []
@grammar(str_types)
def name(value):
"""string : Name of the visualization (optional)
"""
@grammar(int)
def width(value):
"""int : Width of the visualization in pixels
Default is 500 if undefined.
"""
if value < 0:
raise ValueError('width cannot be negative')
@grammar(int)
def height(value):
"""int : Height of the visualization in pixels
Default is 500 if undefined.
"""
if value < 0:
raise ValueError('height cannot be negative')
@grammar(list)
def viewport(value):
"""2-element list of ints : Dimensions of the viewport
The viewport is a bounding box containing the visualization. If the
dimensions of the visualization are larger than the viewport, then
the visualization will be scrollable.
If undefined, then the full visualization is shown.
"""
if len(value) != 2:
raise ValueError('viewport must have 2 dimensions')
for v in value:
_assert_is_type('viewport dimension', v, int)
if v < 0:
raise ValueError('viewport dimensions cannot be negative')
@grammar((int, dict,) + str_types)
def padding(value):
"""int or dict : Padding around visualization
The padding defines the distance between the edge of the
visualization canvas to the visualization box. It does not count as
part of the visualization width/height. Values cannot be negative.
If a dict, padding must have all keys ``''top'``, ``'left'``,
``'right'``, and ``'bottom'`` with int values.
"""
if isinstance(value, dict):
required_keys = ['top', 'left', 'right', 'bottom']
for key in required_keys:
if key not in value:
error = ('Padding must have keys "{0}".'
.format('", "'.join(required_keys)))
raise ValueError(error)
_assert_is_type('padding: {0}'.format(key), value[key], int)
if value[key] < 0:
raise ValueError('Padding cannot be negative.')
elif isinstance(value, int):
if value < 0:
raise ValueError('Padding cannot be negative.')
else:
if value not in ("auto", "strict"):
raise ValueError('Padding can only be auto or strict.')
@grammar((list, KeyedList))
def data(value):
"""list or KeyedList of ``Data`` : Data definitions
This defines the data being visualized. See the :class:`Data` class
for details.
"""
for i, entry in enumerate(value):
_assert_is_type('data[{0}]'.format(i), entry, Data)
@grammar((list, KeyedList))
def scales(value):
"""list or KeyedList of ``Scale`` : Scale definitions
Scales map the data from the domain of the data to some
visualization space (such as an x-axis). See the :class:`Scale`
class for details.
"""
for i, entry in enumerate(value):
_assert_is_type('scales[{0}]'.format(i), entry, Scale)
@grammar((list, KeyedList))
def axes(value):
"""list or KeyedList of ``Axis`` : Axis definitions
Axes define the locations of the data being mapped by the scales.
See the :class:`Axis` class for details.
"""
for i, entry in enumerate(value):
_assert_is_type('axes[{0}]'.format(i), entry, Axis)
@grammar((list, KeyedList))
def marks(value):
"""list or KeyedList of ``Mark`` : Mark definitions
Marks are the visual objects (such as lines, bars, etc.) that
represent the data in the visualization space. See the :class:`Mark`
class for details.
"""
for i, entry in enumerate(value):
_assert_is_type('marks[{0}]'.format(i), entry, Mark)
@grammar((list, KeyedList))
def legends(value):
"""list or KeyedList of ``Legends`` : Legend definitions
Legends visualize scales, and take one or more scales as their input.
They can be customized via a LegendProperty object.
"""
for i, entry in enumerate(value):
_assert_is_type('legends[{0}]'.format(i), entry, Legend)
def axis_titles(self, x=None, y=None):
"""Apply axis titles to the figure.
This is a convenience method for manually modifying the "Axes" mark.
Parameters
----------
x: string, default 'null'
X-axis title
y: string, default 'null'
Y-axis title
Example
-------
>>>vis.axis_titles(y="Data 1", x="Data 2")
"""
keys = self.axes.get_keys()
if keys:
for key in keys:
if key == 'x':
self.axes[key].title = x
elif key == 'y':
self.axes[key].title = y
else:
self.axes.extend([Axis(type='x', title=x),
Axis(type='y', title=y)])
def legend(self, title=None, scale='color'):
"""Convience method for adding a legend to the figure.
Important: This defaults to the color scale that is generated with
Line, Area, Stacked Line, etc charts. For bar charts, the scale ref is
usually 'y'.
Parameters
----------
title: string, default None
Legend Title
scale: string, default 'color'
Scale reference for legend
"""
self.legends.append(Legend(title=title, fill=scale, offset=0))
def colors(self, brew=None):
"""Convenience method for adding color brewer scales to charts with a
color scale, such as stacked or grouped bars.
See the colors here: http://colorbrewer2.org/
Or here: http://bl.ocks.org/mbostock/5577023
This assumes that a 'color' scale exists on your chart.
Parameters
----------
brew: string, default None
Color brewer scheme (BuGn, YlOrRd, etc)
"""
self.scales['color'].range = brews[brew]
def validate(self, require_all=True, scale='colors'):
"""Validate the visualization contents.
Parameters
----------
require_all : boolean, default True
If True (default), then all fields ``data``, ``scales``,
``axes``, and ``marks`` must be defined. The user is allowed to
disable this if the intent is to define the elements
client-side.
If the contents of the visualization are not valid Vega, then a
:class:`ValidationError` is raised.
"""
super(self.__class__, self).validate()
required_attribs = ('data', 'scales', 'axes', 'marks')
for elem in required_attribs:
attr = getattr(self, elem)
if attr:
# Validate each element of the sets of data, etc
for entry in attr:
entry.validate()
names = [a.name for a in attr]
if len(names) != len(set(names)):
raise ValidationError(elem + ' has duplicate names')
elif require_all:
raise ValidationError(
elem + ' must be defined for valid visualization')
def display(self):
"""Display visualization inline in IPython notebook"""
from IPython.core.display import display, HTML, Javascript
# Copied from vincent.ipynb:
# HACK: use a randomly chosen unique div id
id = random.randint(0, 2 ** 16)
a = HTML('<div id="vis%d"></div>' % id)
b = Javascript(_vega_t % (self.to_json(pretty_print=False), id))
display(a, b)
_vega_t = """
( function() {
var _do_plot = function() {
if ( typeof vg == 'undefined' ) {
$([IPython.events]).on("vega_loaded.vincent", _do_plot);
return;
}
vg.parse.spec(%s, function(chart) {
chart({el: "#vis%d"}).update();
});
};
_do_plot();
})();
"""
| gpl-2.0 | 238,045,456,765,235,070 | 32.659722 | 78 | 0.569321 | false |
koyuawsmbrtn/eclock | windows/Python27/Lib/site-packages/pygame/tests/color_test.py | 3 | 28147 | #################################### IMPORTS ###################################
from __future__ import generators
if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import test_not_implemented, unittest
else:
from test.test_utils import test_not_implemented, unittest
import pygame
from pygame.compat import long_
import math
################################### CONSTANTS ##################################
rgba_vals = [0, 1, 62, 63, 126, 127, 255]
rgba_combinations = [ (r,g,b,a) for r in rgba_vals
for g in rgba_vals
for b in rgba_vals
for a in rgba_vals ]
################################################################################
def rgba_combos_Color_generator ():
for rgba in rgba_combinations:
yield pygame.Color(*rgba)
# Python gamma correct
def gamma_correct (rgba_0_255, gamma):
corrected = round(255.0 * math.pow(rgba_0_255/255.0, gamma))
return max(min( int(corrected), 255), 0)
################################################################################
# TODO: add tests for
# correct_gamma() -- test against statically defined verified correct values
# coerce () -- ??
def _assignr (x, y):
x.r = y
def _assigng (x, y):
x.g = y
def _assignb (x, y):
x.b = y
def _assigna (x, y):
x.a = y
def _assign_item (x, p, y):
x[p] = y
class ColorTypeTest (unittest.TestCase):
def test_invalid_html_hex_codes(self):
# This was a problem with the way 2 digit hex numbers were
# calculated. The test_hex_digits test is related to the fix.
Color = pygame.color.Color
self.failUnlessRaises(ValueError, lambda: Color('# f000000'))
self.failUnlessRaises(ValueError, lambda: Color('#f 000000'))
self.failUnlessRaises(ValueError, lambda: Color('#-f000000'))
def test_hex_digits(self):
# This is an implementation specific test.
# Two digit hex numbers are calculated using table lookups
# for the upper and lower digits.
Color = pygame.color.Color
self.assertEqual(Color('#00000000').r, 0x00)
self.assertEqual(Color('#10000000').r, 0x10)
self.assertEqual(Color('#20000000').r, 0x20)
self.assertEqual(Color('#30000000').r, 0x30)
self.assertEqual(Color('#40000000').r, 0x40)
self.assertEqual(Color('#50000000').r, 0x50)
self.assertEqual(Color('#60000000').r, 0x60)
self.assertEqual(Color('#70000000').r, 0x70)
self.assertEqual(Color('#80000000').r, 0x80)
self.assertEqual(Color('#90000000').r, 0x90)
self.assertEqual(Color('#A0000000').r, 0xA0)
self.assertEqual(Color('#B0000000').r, 0xB0)
self.assertEqual(Color('#C0000000').r, 0xC0)
self.assertEqual(Color('#D0000000').r, 0xD0)
self.assertEqual(Color('#E0000000').r, 0xE0)
self.assertEqual(Color('#F0000000').r, 0xF0)
self.assertEqual(Color('#01000000').r, 0x01)
self.assertEqual(Color('#02000000').r, 0x02)
self.assertEqual(Color('#03000000').r, 0x03)
self.assertEqual(Color('#04000000').r, 0x04)
self.assertEqual(Color('#05000000').r, 0x05)
self.assertEqual(Color('#06000000').r, 0x06)
self.assertEqual(Color('#07000000').r, 0x07)
self.assertEqual(Color('#08000000').r, 0x08)
self.assertEqual(Color('#09000000').r, 0x09)
self.assertEqual(Color('#0A000000').r, 0x0A)
self.assertEqual(Color('#0B000000').r, 0x0B)
self.assertEqual(Color('#0C000000').r, 0x0C)
self.assertEqual(Color('#0D000000').r, 0x0D)
self.assertEqual(Color('#0E000000').r, 0x0E)
self.assertEqual(Color('#0F000000').r, 0x0F)
def test_comparison(self):
Color = pygame.color.Color
# Check valid comparisons
self.failUnless(Color(255, 0, 0, 0) == Color(255, 0, 0, 0))
self.failUnless(Color(0, 255, 0, 0) == Color(0, 255, 0, 0))
self.failUnless(Color(0, 0, 255, 0) == Color(0, 0, 255, 0))
self.failUnless(Color(0, 0, 0, 255) == Color(0, 0, 0, 255))
self.failIf(Color(0, 0, 0, 0) == Color(255, 0, 0, 0))
self.failIf(Color(0, 0, 0, 0) == Color(0, 255, 0, 0))
self.failIf(Color(0, 0, 0, 0) == Color(0, 0, 255, 0))
self.failIf(Color(0, 0, 0, 0) == Color(0, 0, 0, 255))
self.failUnless(Color(0, 0, 0, 0) != Color(255, 0, 0, 0))
self.failUnless(Color(0, 0, 0, 0) != Color(0, 255, 0, 0))
self.failUnless(Color(0, 0, 0, 0) != Color(0, 0, 255, 0))
self.failUnless(Color(0, 0, 0, 0) != Color(0, 0, 0, 255))
self.failIf(Color(255, 0, 0, 0) != Color(255, 0, 0, 0))
self.failIf(Color(0, 255, 0, 0) != Color(0, 255, 0, 0))
self.failIf(Color(0, 0, 255, 0) != Color(0, 0, 255, 0))
self.failIf(Color(0, 0, 0, 255) != Color(0, 0, 0, 255))
self.failUnless(Color(255, 0, 0, 0) == (255, 0, 0, 0))
self.failUnless(Color(0, 255, 0, 0) == (0, 255, 0, 0))
self.failUnless(Color(0, 0, 255, 0) == (0, 0, 255, 0))
self.failUnless(Color(0, 0, 0, 255) == (0, 0, 0, 255))
self.failIf(Color(0, 0, 0, 0) == (255, 0, 0, 0))
self.failIf(Color(0, 0, 0, 0) == (0, 255, 0, 0))
self.failIf(Color(0, 0, 0, 0) == (0, 0, 255, 0))
self.failIf(Color(0, 0, 0, 0) == (0, 0, 0, 255))
self.failUnless(Color(0, 0, 0, 0) != (255, 0, 0, 0))
self.failUnless(Color(0, 0, 0, 0) != (0, 255, 0, 0))
self.failUnless(Color(0, 0, 0, 0) != (0, 0, 255, 0))
self.failUnless(Color(0, 0, 0, 0) != (0, 0, 0, 255))
self.failIf(Color(255, 0, 0, 0) != (255, 0, 0, 0))
self.failIf(Color(0, 255, 0, 0) != (0, 255, 0, 0))
self.failIf(Color(0, 0, 255, 0) != (0, 0, 255, 0))
self.failIf(Color(0, 0, 0, 255) != (0, 0, 0, 255))
self.failUnless((255, 0, 0, 0) == Color(255, 0, 0, 0))
self.failUnless((0, 255, 0, 0) == Color(0, 255, 0, 0))
self.failUnless((0, 0, 255, 0) == Color(0, 0, 255, 0))
self.failUnless((0, 0, 0, 255) == Color(0, 0, 0, 255))
self.failIf((0, 0, 0, 0) == Color(255, 0, 0, 0))
self.failIf((0, 0, 0, 0) == Color(0, 255, 0, 0))
self.failIf((0, 0, 0, 0) == Color(0, 0, 255, 0))
self.failIf((0, 0, 0, 0) == Color(0, 0, 0, 255))
self.failUnless((0, 0, 0, 0) != Color(255, 0, 0, 0))
self.failUnless((0, 0, 0, 0) != Color(0, 255, 0, 0))
self.failUnless((0, 0, 0, 0) != Color(0, 0, 255, 0))
self.failUnless((0, 0, 0, 0) != Color(0, 0, 0, 255))
self.failIf((255, 0, 0, 0) != Color(255, 0, 0, 0))
self.failIf((0, 255, 0, 0) != Color(0, 255, 0, 0))
self.failIf((0, 0, 255, 0) != Color(0, 0, 255, 0))
self.failIf((0, 0, 0, 255) != Color(0, 0, 0, 255))
class TupleSubclass(tuple):
pass
self.failUnless(Color(255, 0, 0, 0) == TupleSubclass((255, 0, 0, 0)))
self.failUnless(TupleSubclass((255, 0, 0, 0)) == Color(255, 0, 0, 0))
self.failIf(Color(255, 0, 0, 0) != TupleSubclass((255, 0, 0, 0)))
self.failIf(TupleSubclass((255, 0, 0, 0)) != Color(255, 0, 0, 0))
# These are not supported so will be unequal.
self.failIf(Color(255, 0, 0, 0) == "#ff000000")
self.failUnless(Color(255, 0, 0, 0) != "#ff000000")
self.failIf("#ff000000" == Color(255, 0, 0, 0))
self.failUnless("#ff000000" != Color(255, 0, 0, 0))
self.failIf(Color(255, 0, 0, 0) == 0xff000000)
self.failUnless(Color(255, 0, 0, 0) != 0xff000000)
self.failIf(0xff000000 == Color(255, 0, 0, 0))
self.failUnless(0xff000000 != Color(255, 0, 0, 0))
self.failIf(Color(255, 0, 0, 0) == [255, 0, 0, 0])
self.failUnless(Color(255, 0, 0, 0) != [255, 0, 0, 0])
self.failIf([255, 0, 0, 0] == Color(255, 0, 0 ,0))
self.failUnless([255, 0, 0, 0] != Color(255, 0, 0, 0))
# Comparison is not implemented for invalid color values.
class Test(object):
def __eq__(self, other):
return -1
def __ne__(self, other):
return -2
class TestTuple(tuple):
def __eq__(self, other):
return -1
def __ne__(self, other):
return -2
t = Test()
t_tuple = TestTuple(('a', 0, 0, 0))
black = Color('black')
self.assertEqual(black == t, -1)
self.assertEqual(t == black, -1)
self.assertEqual(black != t, -2)
self.assertEqual(t != black, -2)
self.assertEqual(black == t_tuple, -1)
self.assertEqual(black != t_tuple, -2)
self.assertEqual(t_tuple == black, -1)
self.assertEqual(t_tuple != black, -2)
def test_ignore_whitespace(self):
self.assertEquals(pygame.color.Color('red'), pygame.color.Color(' r e d '))
def test_slice(self):
#"""|tags: python3_ignore|"""
# slicing a color gives you back a tuple.
# do all sorts of slice combinations.
c = pygame.Color(1,2,3,4)
self.assertEquals((1,2,3,4), c[:])
self.assertEquals((1,2,3), c[:-1])
self.assertEquals((), c[:-5])
self.assertEquals((1,2,3,4), c[:4])
self.assertEquals((1,2,3,4), c[:5])
self.assertEquals((1,2), c[:2])
self.assertEquals((1,), c[:1])
self.assertEquals((), c[:0])
self.assertEquals((2,), c[1:-2])
self.assertEquals((3, 4), c[-2:])
self.assertEquals((4,), c[-1:])
# NOTE: assigning to a slice is currently unsupported.
def test_unpack(self):
# should be able to unpack to r,g,b,a and r,g,b
c = pygame.Color(1,2,3,4)
r,g,b,a = c
self.assertEquals((1,2,3,4), (r,g,b,a))
self.assertEquals(c, (r,g,b,a))
c.set_length(3)
r,g,b = c
self.assertEquals((1,2,3), (r,g,b))
def test_length(self):
# should be able to unpack to r,g,b,a and r,g,b
c = pygame.Color(1,2,3,4)
self.assertEquals(len(c), 4)
c.set_length(3)
self.assertEquals(len(c), 3)
# it keeps the old alpha anyway...
self.assertEquals(c.a, 4)
# however you can't get the alpha in this way:
self.assertRaises (IndexError, lambda x:c[x], 4)
c.set_length(4)
self.assertEquals(len(c), 4)
self.assertEquals(len(c), 4)
self.assertRaises (ValueError, c.set_length, 5)
self.assertRaises (ValueError, c.set_length, -1)
self.assertRaises (ValueError, c.set_length, 0)
self.assertRaises (ValueError, c.set_length, pow(2,long_(33)))
def test_case_insensitivity_of_string_args(self):
self.assertEquals(pygame.color.Color('red'), pygame.color.Color('Red'))
def test_color (self):
c = pygame.Color (10, 20, 30, 40)
self.assertEquals (c.r, 10)
self.assertEquals (c.g, 20)
self.assertEquals (c.b, 30)
self.assertEquals (c.a, 40)
c = pygame.Color ("indianred3")
self.assertEquals (c.r, 205)
self.assertEquals (c.g, 85)
self.assertEquals (c.b, 85)
self.assertEquals (c.a, 255)
c = pygame.Color (0xAABBCCDD)
self.assertEquals (c.r, 0xAA)
self.assertEquals (c.g, 0xBB)
self.assertEquals (c.b, 0xCC)
self.assertEquals (c.a, 0xDD)
self.assertRaises (ValueError, pygame.Color, 257, 10, 105, 44)
self.assertRaises (ValueError, pygame.Color, 10, 257, 105, 44)
self.assertRaises (ValueError, pygame.Color, 10, 105, 257, 44)
self.assertRaises (ValueError, pygame.Color, 10, 105, 44, 257)
def test_rgba (self):
c = pygame.Color (0)
self.assertEquals (c.r, 0)
self.assertEquals (c.g, 0)
self.assertEquals (c.b, 0)
self.assertEquals (c.a, 0)
# Test simple assignments
c.r = 123
self.assertEquals (c.r, 123)
self.assertRaises (ValueError, _assignr, c, 537)
self.assertEquals (c.r, 123)
self.assertRaises (ValueError, _assignr, c, -3)
self.assertEquals (c.r, 123)
c.g = 55
self.assertEquals (c.g, 55)
self.assertRaises (ValueError, _assigng, c, 348)
self.assertEquals (c.g, 55)
self.assertRaises (ValueError, _assigng, c, -44)
self.assertEquals (c.g, 55)
c.b = 77
self.assertEquals (c.b, 77)
self.assertRaises (ValueError, _assignb, c, 256)
self.assertEquals (c.b, 77)
self.assertRaises (ValueError, _assignb, c, -12)
self.assertEquals (c.b, 77)
c.a = 255
self.assertEquals (c.a, 255)
self.assertRaises (ValueError, _assigna, c, 312)
self.assertEquals (c.a, 255)
self.assertRaises (ValueError, _assigna, c, -10)
self.assertEquals (c.a, 255)
def test_repr (self):
c = pygame.Color (68, 38, 26, 69)
t = "(68, 38, 26, 69)"
self.assertEquals (repr (c), t)
def test_add (self):
c1 = pygame.Color (0)
self.assertEquals (c1.r, 0)
self.assertEquals (c1.g, 0)
self.assertEquals (c1.b, 0)
self.assertEquals (c1.a, 0)
c2 = pygame.Color (20, 33, 82, 193)
self.assertEquals (c2.r, 20)
self.assertEquals (c2.g, 33)
self.assertEquals (c2.b, 82)
self.assertEquals (c2.a, 193)
c3 = c1 + c2
self.assertEquals (c3.r, 20)
self.assertEquals (c3.g, 33)
self.assertEquals (c3.b, 82)
self.assertEquals (c3.a, 193)
c3 = c3 + c2
self.assertEquals (c3.r, 40)
self.assertEquals (c3.g, 66)
self.assertEquals (c3.b, 164)
self.assertEquals (c3.a, 255)
def test_sub (self):
c1 = pygame.Color (0xFFFFFFFF)
self.assertEquals (c1.r, 255)
self.assertEquals (c1.g, 255)
self.assertEquals (c1.b, 255)
self.assertEquals (c1.a, 255)
c2 = pygame.Color (20, 33, 82, 193)
self.assertEquals (c2.r, 20)
self.assertEquals (c2.g, 33)
self.assertEquals (c2.b, 82)
self.assertEquals (c2.a, 193)
c3 = c1 - c2
self.assertEquals (c3.r, 235)
self.assertEquals (c3.g, 222)
self.assertEquals (c3.b, 173)
self.assertEquals (c3.a, 62)
c3 = c3 - c2
self.assertEquals (c3.r, 215)
self.assertEquals (c3.g, 189)
self.assertEquals (c3.b, 91)
self.assertEquals (c3.a, 0)
def test_mul (self):
c1 = pygame.Color (0x01010101)
self.assertEquals (c1.r, 1)
self.assertEquals (c1.g, 1)
self.assertEquals (c1.b, 1)
self.assertEquals (c1.a, 1)
c2 = pygame.Color (2, 5, 3, 22)
self.assertEquals (c2.r, 2)
self.assertEquals (c2.g, 5)
self.assertEquals (c2.b, 3)
self.assertEquals (c2.a, 22)
c3 = c1 * c2
self.assertEquals (c3.r, 2)
self.assertEquals (c3.g, 5)
self.assertEquals (c3.b, 3)
self.assertEquals (c3.a, 22)
c3 = c3 * c2
self.assertEquals (c3.r, 4)
self.assertEquals (c3.g, 25)
self.assertEquals (c3.b, 9)
self.assertEquals (c3.a, 255)
def test_div (self):
c1 = pygame.Color (0x80808080)
self.assertEquals (c1.r, 128)
self.assertEquals (c1.g, 128)
self.assertEquals (c1.b, 128)
self.assertEquals (c1.a, 128)
c2 = pygame.Color (2, 4, 8, 16)
self.assertEquals (c2.r, 2)
self.assertEquals (c2.g, 4)
self.assertEquals (c2.b, 8)
self.assertEquals (c2.a, 16)
c3 = c1 // c2
self.assertEquals (c3.r, 64)
self.assertEquals (c3.g, 32)
self.assertEquals (c3.b, 16)
self.assertEquals (c3.a, 8)
c3 = c3 // c2
self.assertEquals (c3.r, 32)
self.assertEquals (c3.g, 8)
self.assertEquals (c3.b, 2)
self.assertEquals (c3.a, 0)
def test_mod (self):
c1 = pygame.Color (0xFFFFFFFF)
self.assertEquals (c1.r, 255)
self.assertEquals (c1.g, 255)
self.assertEquals (c1.b, 255)
self.assertEquals (c1.a, 255)
c2 = pygame.Color (2, 4, 8, 16)
self.assertEquals (c2.r, 2)
self.assertEquals (c2.g, 4)
self.assertEquals (c2.b, 8)
self.assertEquals (c2.a, 16)
c3 = c1 % c2
self.assertEquals (c3.r, 1)
self.assertEquals (c3.g, 3)
self.assertEquals (c3.b, 7)
self.assertEquals (c3.a, 15)
def test_float (self):
c = pygame.Color (0xCC00CC00)
self.assertEquals (c.r, 204)
self.assertEquals (c.g, 0)
self.assertEquals (c.b, 204)
self.assertEquals (c.a, 0)
self.assertEquals (float (c), float (0xCC00CC00))
c = pygame.Color (0x33727592)
self.assertEquals (c.r, 51)
self.assertEquals (c.g, 114)
self.assertEquals (c.b, 117)
self.assertEquals (c.a, 146)
self.assertEquals (float (c), float (0x33727592))
def test_oct (self):
c = pygame.Color (0xCC00CC00)
self.assertEquals (c.r, 204)
self.assertEquals (c.g, 0)
self.assertEquals (c.b, 204)
self.assertEquals (c.a, 0)
self.assertEquals (oct (c), oct (0xCC00CC00))
c = pygame.Color (0x33727592)
self.assertEquals (c.r, 51)
self.assertEquals (c.g, 114)
self.assertEquals (c.b, 117)
self.assertEquals (c.a, 146)
self.assertEquals (oct (c), oct (0x33727592))
def test_hex (self):
c = pygame.Color (0xCC00CC00)
self.assertEquals (c.r, 204)
self.assertEquals (c.g, 0)
self.assertEquals (c.b, 204)
self.assertEquals (c.a, 0)
self.assertEquals (hex (c), hex (0xCC00CC00))
c = pygame.Color (0x33727592)
self.assertEquals (c.r, 51)
self.assertEquals (c.g, 114)
self.assertEquals (c.b, 117)
self.assertEquals (c.a, 146)
self.assertEquals (hex (c), hex (0x33727592))
def test_webstyle(self):
c = pygame.Color ("#CC00CC11")
self.assertEquals (c.r, 204)
self.assertEquals (c.g, 0)
self.assertEquals (c.b, 204)
self.assertEquals (c.a, 17)
self.assertEquals (hex (c), hex (0xCC00CC11))
c = pygame.Color ("#CC00CC")
self.assertEquals (c.r, 204)
self.assertEquals (c.g, 0)
self.assertEquals (c.b, 204)
self.assertEquals (c.a, 255)
self.assertEquals (hex (c), hex (0xCC00CCFF))
c = pygame.Color ("0xCC00CC11")
self.assertEquals (c.r, 204)
self.assertEquals (c.g, 0)
self.assertEquals (c.b, 204)
self.assertEquals (c.a, 17)
self.assertEquals (hex (c), hex (0xCC00CC11))
c = pygame.Color ("0xCC00CC")
self.assertEquals (c.r, 204)
self.assertEquals (c.g, 0)
self.assertEquals (c.b, 204)
self.assertEquals (c.a, 255)
self.assertEquals (hex (c), hex (0xCC00CCFF))
self.assertRaises (ValueError, pygame.Color, "#cc00qq")
self.assertRaises (ValueError, pygame.Color, "0xcc00qq")
self.assertRaises (ValueError, pygame.Color, "09abcdef")
self.assertRaises (ValueError, pygame.Color, "09abcde")
self.assertRaises (ValueError, pygame.Color, "quarky")
def test_int (self):
# This will be a long
c = pygame.Color (0xCC00CC00)
self.assertEquals (c.r, 204)
self.assertEquals (c.g, 0)
self.assertEquals (c.b, 204)
self.assertEquals (c.a, 0)
self.assertEquals (int (c), int (0xCC00CC00))
# This will be an int
c = pygame.Color (0x33727592)
self.assertEquals (c.r, 51)
self.assertEquals (c.g, 114)
self.assertEquals (c.b, 117)
self.assertEquals (c.a, 146)
self.assertEquals (int (c), int (0x33727592))
def test_long (self):
# This will be a long
c = pygame.Color (0xCC00CC00)
self.assertEquals (c.r, 204)
self.assertEquals (c.g, 0)
self.assertEquals (c.b, 204)
self.assertEquals (c.a, 0)
self.assertEquals (long_ (c), long_ (0xCC00CC00))
# This will be an int
c = pygame.Color (0x33727592)
self.assertEquals (c.r, 51)
self.assertEquals (c.g, 114)
self.assertEquals (c.b, 117)
self.assertEquals (c.a, 146)
self.assertEquals (long_ (c), long_ (0x33727592))
def test_normalize (self):
c = pygame.Color (204, 38, 194, 55)
self.assertEquals (c.r, 204)
self.assertEquals (c.g, 38)
self.assertEquals (c.b, 194)
self.assertEquals (c.a, 55)
t = c.normalize ()
self.assertAlmostEquals (t[0], 0.800000, 5)
self.assertAlmostEquals (t[1], 0.149016, 5)
self.assertAlmostEquals (t[2], 0.760784, 5)
self.assertAlmostEquals (t[3], 0.215686, 5)
def test_len (self):
c = pygame.Color (204, 38, 194, 55)
self.assertEquals (len (c), 4)
def test_get_item (self):
c = pygame.Color (204, 38, 194, 55)
self.assertEquals (c[0], 204)
self.assertEquals (c[1], 38)
self.assertEquals (c[2], 194)
self.assertEquals (c[3], 55)
def test_set_item (self):
c = pygame.Color (204, 38, 194, 55)
self.assertEquals (c[0], 204)
self.assertEquals (c[1], 38)
self.assertEquals (c[2], 194)
self.assertEquals (c[3], 55)
c[0] = 33
self.assertEquals (c[0], 33)
c[1] = 48
self.assertEquals (c[1], 48)
c[2] = 173
self.assertEquals (c[2], 173)
c[3] = 213
self.assertEquals (c[3], 213)
# Now try some 'invalid' ones
self.assertRaises (ValueError, _assign_item, c, 0, 95.485)
self.assertEquals (c[0], 33)
self.assertRaises (ValueError, _assign_item, c, 1, -83)
self.assertEquals (c[1], 48)
self.assertRaises (ValueError, _assign_item, c, 2, "Hello")
self.assertEquals (c[2], 173)
def test_Color_type_works_for_Surface_get_and_set_colorkey(self):
s = pygame.Surface((32, 32))
c = pygame.Color(33, 22, 11, 255)
s.set_colorkey(c)
get_r, get_g, get_b, get_a = s.get_colorkey()
self.assert_(get_r == c.r)
self.assert_(get_g == c.g)
self.assert_(get_b == c.b)
self.assert_(get_a == c.a)
########## HSLA, HSVA, CMY, I1I2I3 ALL ELEMENTS WITHIN SPECIFIED RANGE #########
def test_hsla__all_elements_within_limits (self):
for c in rgba_combos_Color_generator():
h, s, l, a = c.hsla
self.assert_(0 <= h <= 360)
self.assert_(0 <= s <= 100)
self.assert_(0 <= l <= 100)
self.assert_(0 <= a <= 100)
def test_hsva__all_elements_within_limits (self):
for c in rgba_combos_Color_generator():
h, s, v, a = c.hsva
self.assert_(0 <= h <= 360)
self.assert_(0 <= s <= 100)
self.assert_(0 <= v <= 100)
self.assert_(0 <= a <= 100)
def test_cmy__all_elements_within_limits (self):
for c in rgba_combos_Color_generator():
c, m, y = c.cmy
self.assert_(0 <= c <= 1)
self.assert_(0 <= m <= 1)
self.assert_(0 <= y <= 1)
def test_i1i2i3__all_elements_within_limits (self):
for c in rgba_combos_Color_generator():
i1, i2, i3 = c.i1i2i3
self.assert_( 0 <= i1 <= 1)
self.assert_( -0.5 <= i2 <= 0.5)
self.assert_( -0.5 <= i3 <= 0.5)
####################### COLORSPACE PROPERTY SANITY TESTS #######################
def colorspaces_converted_should_not_raise (self, prop):
fails = 0
x = 0
for c in rgba_combos_Color_generator():
x += 1
other = pygame.Color(0)
try:
setattr(other, prop, getattr(c, prop))
#eg other.hsla = c.hsla
except ValueError:
fails += 1
self.assert_(x > 0, "x is combination counter, 0 means no tests!")
self.assert_((fails, x) == (0, x))
def test_hsla__sanity_testing_converted_should_not_raise (self):
self.colorspaces_converted_should_not_raise('hsla')
def test_hsva__sanity_testing_converted_should_not_raise (self):
self.colorspaces_converted_should_not_raise('hsva')
def test_cmy__sanity_testing_converted_should_not_raise (self):
self.colorspaces_converted_should_not_raise('cmy')
def test_i1i2i3__sanity_testing_converted_should_not_raise (self):
self.colorspaces_converted_should_not_raise('i1i2i3')
################################################################################
def colorspaces_converted_should_equate_bar_rounding (self, prop):
for c in rgba_combos_Color_generator():
other = pygame.Color(0)
try:
setattr(other, prop, getattr(c, prop))
#eg other.hsla = c.hsla
self.assert_(abs(other.r - c.r) <= 1)
self.assert_(abs(other.b - c.b) <= 1)
self.assert_(abs(other.g - c.g) <= 1)
# CMY and I1I2I3 do not care about the alpha
if not prop in ("cmy", "i1i2i3"):
self.assert_(abs(other.a - c.a) <= 1)
except ValueError:
pass # other tests will notify, this tests equation
def test_hsla__sanity_testing_converted_should_equate_bar_rounding(self):
self.colorspaces_converted_should_equate_bar_rounding('hsla')
def test_hsva__sanity_testing_converted_should_equate_bar_rounding(self):
self.colorspaces_converted_should_equate_bar_rounding('hsva')
def test_cmy__sanity_testing_converted_should_equate_bar_rounding(self):
self.colorspaces_converted_should_equate_bar_rounding('cmy')
def test_i1i2i3__sanity_testing_converted_should_equate_bar_rounding(self):
self.colorspaces_converted_should_equate_bar_rounding('i1i2i3')
################################################################################
def test_correct_gamma__verified_against_python_implementation(self):
"|tags:slow|"
# gamma_correct defined at top of page
gammas = [i / 10.0 for i in range(1, 31)] # [0.1 ... 3.0]
gammas_len = len(gammas)
for i, c in enumerate(rgba_combos_Color_generator()):
gamma = gammas[i % gammas_len]
corrected = pygame.Color(*[gamma_correct(x, gamma)
for x in tuple(c)])
lib_corrected = c.correct_gamma(gamma)
self.assert_(corrected.r == lib_corrected.r)
self.assert_(corrected.g == lib_corrected.g)
self.assert_(corrected.b == lib_corrected.b)
self.assert_(corrected.a == lib_corrected.a)
# TODO: test against statically defined verified _correct_ values
# assert corrected.r == 125 etc.
def test_pickle(self):
import pickle
c1 = pygame.Color(1,2,3,4)
#c2 = pygame.Color(255,254,253,252)
pickle_string = pickle.dumps(c1)
c1_frompickle = pickle.loads(pickle_string)
self.assertEqual(c1,c1_frompickle)
################################################################################
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 7,961,630,428,869,945,000 | 34.719543 | 83 | 0.540413 | false |
s6530085/FundSpider | Fund/analysis.py | 1 | 8303 | # -*- coding: utf-8 -*-
__author__ = 'study_sun'
from entity import FundInfo
from spider_base.analysis import *
from spider_base.convenient import *
reload(sys)
sys.setdefaultencoding('utf-8')
from collector import FundCollector
import os
#数据库的分析器,前提是数据库已经下载好了,所以先要运行main
class FundAnalysis(SBAnalysis):
def __init__(self, path='', db_name=FundCollector.DATABASE_NAME):
# 有点很烦的
# self.stock_analysis = FundAnalysis('..'+os.sep+'stock'+os.sep+StockCollector.DATABASE_NAME)
super(FundAnalysis, self).__init__(path+db_name)
#code和name都是只对相应值进行检索
def querycode(self, code, order='', isasc=True):
return self.querybycol(FundInfo.CODE_KEY, code, order, isasc)
def queryshortname(self, name, order='', isasc=True):
return self.querybycol(FundInfo.SHORTNAME_KEY, name, order, isasc)
def queryname(self, name, order='', isasc=True):
return self.querybycol(FundInfo.NAME_KEY, name, order, isasc)
def querytrack(self, track, order='', isasc=True):
return self.querybycol(FundInfo.TRACK_KEY, track, order, isasc)
def querycompare(self, track, order='', isasc=True):
return self.querybycol(FundInfo.COMPARE_KEY, track, order, isasc)
def querystyle(self, style, order="", isasc=True):
return self.querybycol(FundInfo.STYLE_KEY, style, order, isasc)
#其他方法懒得扩展了,如果想要检索
def querybycol(self, colname, colvalue, order='', isasc=True):
sql = '''
SELECT * FROM {table} WHERE {colname} LIKE "%{colvalue}%"
'''.format(table=FundCollector.DATABASE_TABLE_NAME, colname=colname, colvalue=colvalue)
if len(order) > 0 :
sql += ' ORDER BY {order} {asc}'.format(order=order, asc='ASC' if isasc else 'DESC')
return self.query(sql)
#一般从名字,追踪标的,投资范围和策略里搜索
def querykeyword(self, keyword):
#投资策略废话太多,什么都有,搜索其无意义
sql = '''
SELECT * FROM {table} WHERE {name} LIKE "%{keyword}%" OR {compare} LIKE "%{keyword}%" OR {track} LIKE "%{keyword}%" OR {limits} LIKE "%{keyword}%"
'''.format(table=FundCollector.DATABASE_TABLE_NAME, name=FundInfo.NAME_KEY, compare=FundInfo.COMPARE_KEY, track=FundInfo.TRACK_KEY, limits=FundInfo.LIMITS_KEY, keyword=keyword)
return self.query(sql)
# 查询明星经理人,不过因为季报的原因,可能不是那么及时,顺便可以指定下公司,不然重名就烦了
def querymanager(self, manager, company=''):
sql = '''
SELECT * FROM {table} WHERE {manager_key} LIKE '%{manager}%'
'''.format(table=FundCollector.DATABASE_TABLE_NAME, manager_key=FundInfo.MANAGER_KEY, manager=manager)
if len(company) > 0:
sql += ' AND {company_key} LIKE "%{company}%"'.format(company_key=FundInfo.COMPANY_KEY, company=company)
sql += ';'
return self.query(sql)
#
#这个是直接写好sql传啦.理论上都是最后调这个的哦
def query(self, sql):
result = self.db.cursor().execute(sql)
results = []
for item in result:
f = FundInfo()
f.parse_sqlresult(item)
results.append(f)
return results
# 现在增加了持股比例,所以是传递的票综合起来最多的排前面,但无法处理那种你关注两个票a,b,其中A基金有9%的a,1%的b,B基金有1%的a,10%的b
# 他们的排名会B会在A前的,可a其实涨了10%,而b涨了2%,实际上我希望是A在前面才好,这只能要么加权重,要么自己看了
# 传递参数和权重主要是为了套利,但是基金三个月才出一次季报,鬼知道是不是已经换股了,只能说是投资有风险了
# weight要么不传,要传就要统一口径,建议全传整数
def querystocks(self, stocks, weights=[], cap=10):
all = self.db.cursor().execute('SELECT * FROM {table}'.format(table=FundCollector.DATABASE_TABLE_NAME))
results = []
for item in all:
f = FundInfo()
f.parse_sqlresult(item)
results.append(f)
# 还是应该只统计真有的基金,否则意义不大了
real_results = []
for fundinfo in results:
fundinfo.inter = 0
if len(fundinfo.stocks) > 1:
for (index, stock_per) in enumerate(fundinfo.stocks):
if len(stock_per.split('-')) != 2:
continue
stock, per = stock_per.split('-')
if stock in stocks:
# 如果没权重就当1了
if len(weights) > 0:
i = stocks.index(stock)
fundinfo.inter += float(per.split('%')[0]) * float(weights[i])
else:
fundinfo.inter += float(per.split('%')[0])
if fundinfo.inter > 0:
real_results.append(fundinfo)
real_results.sort(lambda x,y: int(y.inter-x.inter))
return real_results[0:cap]
# 获取某股的机构持有量,当然只能靠前十大持股粗略估计
def querystockinstitutehold(self, stock):
all = self.db.cursor().execute('SELECT * FROM {table}'.format(table=FundCollector.DATABASE_TABLE_NAME))
hold = 0
for item in all:
f = FundInfo()
f.parse_sqlresult(item)
if len(f.stocks) > 1:
for (index, stock_per) in enumerate(f.stocks):
if len(stock_per.split('-')) != 2:
continue
s, per = stock_per.split('-')
if s == stock:
hold += float(per.split('%')[0]) * f.size
return hold
# 获取bench某指数的基金们(分主动被动)按收益率排序,默认值0为搜索主动基金,1为指数或联接,2为全部
def querysortedbench(self, bench, ftype=0):
sql = '''
SELECT * FROM {table} WHERE ({compare} LIKE "%{keyword}%" OR {track} LIKE "%{keyword}%")
'''.format(table=FundCollector.DATABASE_TABLE_NAME, keyword=bench, compare=FundInfo.COMPARE_KEY, track=FundInfo.TRACK_KEY)
# 一般认为股票型和混合型为主动基金,而股票指数和联接算是指数型的
if ftype==0:
append = '''
AND ({type} LIKE "%{stock}%" OR {type} LIKE "%{composition}%")
'''.format(type=FundInfo.TYPE_KEY, stock="股票型", composition="混合型")
sql += append
elif ftype==1:
append = '''
AND ({type} LIKE "%{index}%" OR {type} LIKE "%{connect}%" OR {type} LIKE "%{etf}%")
'''.format(type=FundInfo.TYPE_KEY, index="股票指数", connect="联接", etf="ETF")
sql += append
sql += " ORDER BY {order} DESC;".format(order=FundInfo.ANNUALYIELD_KEY)
return self.query(sql)
# self.inratio = sqlresult[12]
# self.std = sqlresult[13]
# self.sharperatio = sqlresult[14]
# self.inforatio = sqlresult[15]
# self.bias = sqlresult[16]
# self.stocks = sqlresult[17].split(u',')
# self.annualyield = sqlresult[18]
# self.annualrank = sqlresult[19]
# self.style = sqlresult[20]
# self.fee = sqlresult[21]
def _printfunds(funds, simplify=True):
print 'funds count is ' + str(len(funds))
for fund in funds:
if simplify:
print fund.code, fund.shortname, fund.url, fund.track, fund.compare,
print_container(fund.stocks, ' ')
print "规模" + str(fund.size) + "亿", "年化收益率" + str(int(fund.annualyield*100)) + "%", "收益排名前" + str(int(fund.annualrank*100)) + "%", "夏普率" + str(fund.sharperatio*100)+"%", "追踪偏差" + str(fund.bias*100) + '%\n'
else:
print fund
if __name__ == "__main__":
a = FundAnalysis()
_printfunds(a.querytrack('中证银行'))
# _printfunds(a.querymanager("杨飞", company="华泰"))
# for a,b in enumerate('a,b,c'):
# print a, b
# printfunds(a.querycode('161227'), False)
# _printfunds(a.querymanager('杨飞', '国泰'))
| mit | -8,651,986,051,856,601,000 | 40.252809 | 216 | 0.584094 | false |
uranusjr/pycontw2016 | src/core/models.py | 1 | 1196 | from django.apps import apps
from django.db import models
from .validators import EAWMaxLengthValidator
class BigForeignKey(models.ForeignKey):
def db_type(self, connection):
""" Adds support for foreign keys to big integers as primary keys.
Django's AutoField is actually an IntegerField (SQL integer field),
but in some cases we are using bigint on PostgreSQL without Django
knowing it. So we continue to trick Django here, swapping its field
type detection, and just tells it to use bigint.
:seealso: Migrations in the ``postgres`` app.
"""
presumed_type = super().db_type(connection)
if apps.is_installed('postgres') and presumed_type == 'integer':
return 'bigint'
return presumed_type
class EAWTextField(models.TextField):
"""Derived TextField that checks for its content's EAW lengh.
This adds an extra validator that counts EAW wide characters as two
instead of one.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(EAWMaxLengthValidator(self.max_length))
| mit | -364,286,368,662,972,740 | 35.242424 | 75 | 0.675585 | false |
winhamwr/django-xls-fixtures | setup.py | 1 | 1281 | import os
from distutils.core import setup
f = open('README.rst')
readme = f.read()
f.close()
def find_packages(root):
# so we don't depend on setuptools; from the Storm ORM setup.py
packages = []
for directory, subdirectories, files in os.walk(root):
if '__init__.py' in files:
packages.append(directory.replace(os.sep, '.'))
return packages
setup(
name = 'django-xls-fixtures',
version = '0.1dev',
description = "Excel-based fixtures for Django that don't make you edit id's and that handle dependencies for you",
long_description=readme,
author = 'Wes Winham, Trey Peek',
author_email = '[email protected]',
license = 'BSD',
url = 'http://github.com/winhamwr/django-xls-fixtures/tree/master',
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
packages = find_packages('xls_fixtures'),
install_requires=['fixture', 'xlrd'],
)
| bsd-3-clause | -8,427,714,608,080,681,000 | 32.710526 | 119 | 0.625293 | false |
dancingdan/tensorflow | tensorflow/python/saved_model/signature_def_utils_impl.py | 2 | 12452 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SignatureDef utility functions implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import utils
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export('saved_model.build_signature_def',
'saved_model.signature_def_utils.build_signature_def')
@deprecation.deprecated_endpoints(
'saved_model.signature_def_utils.build_signature_def')
def build_signature_def(inputs=None, outputs=None, method_name=None):
"""Utility function to build a SignatureDef protocol buffer.
Args:
inputs: Inputs of the SignatureDef defined as a proto map of string to
tensor info.
outputs: Outputs of the SignatureDef defined as a proto map of string to
tensor info.
method_name: Method name of the SignatureDef as a string.
Returns:
A SignatureDef protocol buffer constructed based on the supplied arguments.
"""
signature_def = meta_graph_pb2.SignatureDef()
if inputs is not None:
for item in inputs:
signature_def.inputs[item].CopyFrom(inputs[item])
if outputs is not None:
for item in outputs:
signature_def.outputs[item].CopyFrom(outputs[item])
if method_name is not None:
signature_def.method_name = method_name
return signature_def
@tf_export('saved_model.regression_signature_def',
'saved_model.signature_def_utils.regression_signature_def')
@deprecation.deprecated_endpoints(
'saved_model.signature_def_utils.regression_signature_def')
def regression_signature_def(examples, predictions):
"""Creates regression signature from given examples and predictions.
This function produces signatures intended for use with the TensorFlow Serving
Regress API (tensorflow_serving/apis/prediction_service.proto), and so
constrains the input and output types to those allowed by TensorFlow Serving.
Args:
examples: A string `Tensor`, expected to accept serialized tf.Examples.
predictions: A float `Tensor`.
Returns:
A regression-flavored signature_def.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('Regression examples cannot be None.')
if not isinstance(examples, ops.Tensor):
raise ValueError('Regression examples must be a string Tensor.')
if predictions is None:
raise ValueError('Regression predictions cannot be None.')
input_tensor_info = utils.build_tensor_info(examples)
if input_tensor_info.dtype != types_pb2.DT_STRING:
raise ValueError('Regression examples must be a string Tensor.')
signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor_info}
output_tensor_info = utils.build_tensor_info(predictions)
if output_tensor_info.dtype != types_pb2.DT_FLOAT:
raise ValueError('Regression output must be a float Tensor.')
signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor_info}
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.REGRESS_METHOD_NAME)
return signature_def
@tf_export('saved_model.classification_signature_def',
'saved_model.signature_def_utils.classification_signature_def')
@deprecation.deprecated_endpoints(
'saved_model.signature_def_utils.classification_signature_def')
def classification_signature_def(examples, classes, scores):
"""Creates classification signature from given examples and predictions.
This function produces signatures intended for use with the TensorFlow Serving
Classify API (tensorflow_serving/apis/prediction_service.proto), and so
constrains the input and output types to those allowed by TensorFlow Serving.
Args:
examples: A string `Tensor`, expected to accept serialized tf.Examples.
classes: A string `Tensor`. Note that the ClassificationResponse message
requires that class labels are strings, not integers or anything else.
scores: a float `Tensor`.
Returns:
A classification-flavored signature_def.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('Classification examples cannot be None.')
if not isinstance(examples, ops.Tensor):
raise ValueError('Classification examples must be a string Tensor.')
if classes is None and scores is None:
raise ValueError('Classification classes and scores cannot both be None.')
input_tensor_info = utils.build_tensor_info(examples)
if input_tensor_info.dtype != types_pb2.DT_STRING:
raise ValueError('Classification examples must be a string Tensor.')
signature_inputs = {signature_constants.CLASSIFY_INPUTS: input_tensor_info}
signature_outputs = {}
if classes is not None:
classes_tensor_info = utils.build_tensor_info(classes)
if classes_tensor_info.dtype != types_pb2.DT_STRING:
raise ValueError('Classification classes must be a string Tensor.')
signature_outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES] = (
classes_tensor_info)
if scores is not None:
scores_tensor_info = utils.build_tensor_info(scores)
if scores_tensor_info.dtype != types_pb2.DT_FLOAT:
raise ValueError('Classification scores must be a float Tensor.')
signature_outputs[signature_constants.CLASSIFY_OUTPUT_SCORES] = (
scores_tensor_info)
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.CLASSIFY_METHOD_NAME)
return signature_def
@tf_export('saved_model.predict_signature_def',
'saved_model.signature_def_utils.predict_signature_def')
@deprecation.deprecated_endpoints(
'saved_model.signature_def_utils.predict_signature_def')
def predict_signature_def(inputs, outputs):
"""Creates prediction signature from given inputs and outputs.
This function produces signatures intended for use with the TensorFlow Serving
Predict API (tensorflow_serving/apis/prediction_service.proto). This API
imposes no constraints on the input and output types.
Args:
inputs: dict of string to `Tensor`.
outputs: dict of string to `Tensor`.
Returns:
A prediction-flavored signature_def.
Raises:
ValueError: If inputs or outputs is `None`.
"""
if inputs is None or not inputs:
raise ValueError('Prediction inputs cannot be None or empty.')
if outputs is None or not outputs:
raise ValueError('Prediction outputs cannot be None or empty.')
signature_inputs = {key: utils.build_tensor_info(tensor)
for key, tensor in inputs.items()}
signature_outputs = {key: utils.build_tensor_info(tensor)
for key, tensor in outputs.items()}
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.PREDICT_METHOD_NAME)
return signature_def
def supervised_train_signature_def(
inputs, loss, predictions=None, metrics=None):
return _supervised_signature_def(
signature_constants.SUPERVISED_TRAIN_METHOD_NAME, inputs, loss=loss,
predictions=predictions, metrics=metrics)
def supervised_eval_signature_def(
inputs, loss, predictions=None, metrics=None):
return _supervised_signature_def(
signature_constants.SUPERVISED_EVAL_METHOD_NAME, inputs, loss=loss,
predictions=predictions, metrics=metrics)
def _supervised_signature_def(
method_name, inputs, loss=None, predictions=None,
metrics=None):
"""Creates a signature for training and eval data.
This function produces signatures that describe the inputs and outputs
of a supervised process, such as training or evaluation, that
results in loss, metrics, and the like. Note that this function only requires
inputs to be not None.
Args:
method_name: Method name of the SignatureDef as a string.
inputs: dict of string to `Tensor`.
loss: dict of string to `Tensor` representing computed loss.
predictions: dict of string to `Tensor` representing the output predictions.
metrics: dict of string to `Tensor` representing metric ops.
Returns:
A train- or eval-flavored signature_def.
Raises:
ValueError: If inputs or outputs is `None`.
"""
if inputs is None or not inputs:
raise ValueError('{} inputs cannot be None or empty.'.format(method_name))
signature_inputs = {key: utils.build_tensor_info(tensor)
for key, tensor in inputs.items()}
signature_outputs = {}
for output_set in (loss, predictions, metrics):
if output_set is not None:
sig_out = {key: utils.build_tensor_info(tensor)
for key, tensor in output_set.items()}
signature_outputs.update(sig_out)
signature_def = build_signature_def(
signature_inputs, signature_outputs, method_name)
return signature_def
@tf_export('saved_model.is_valid_signature',
'saved_model.signature_def_utils.is_valid_signature')
@deprecation.deprecated_endpoints(
'saved_model.signature_def_utils.is_valid_signature')
def is_valid_signature(signature_def):
"""Determine whether a SignatureDef can be served by TensorFlow Serving."""
if signature_def is None:
return False
return (_is_valid_classification_signature(signature_def) or
_is_valid_regression_signature(signature_def) or
_is_valid_predict_signature(signature_def))
def _is_valid_predict_signature(signature_def):
"""Determine whether the argument is a servable 'predict' SignatureDef."""
if signature_def.method_name != signature_constants.PREDICT_METHOD_NAME:
return False
if not signature_def.inputs.keys():
return False
if not signature_def.outputs.keys():
return False
return True
def _is_valid_regression_signature(signature_def):
"""Determine whether the argument is a servable 'regress' SignatureDef."""
if signature_def.method_name != signature_constants.REGRESS_METHOD_NAME:
return False
if (set(signature_def.inputs.keys())
!= set([signature_constants.REGRESS_INPUTS])):
return False
if (signature_def.inputs[signature_constants.REGRESS_INPUTS].dtype !=
types_pb2.DT_STRING):
return False
if (set(signature_def.outputs.keys())
!= set([signature_constants.REGRESS_OUTPUTS])):
return False
if (signature_def.outputs[signature_constants.REGRESS_OUTPUTS].dtype !=
types_pb2.DT_FLOAT):
return False
return True
def _is_valid_classification_signature(signature_def):
"""Determine whether the argument is a servable 'classify' SignatureDef."""
if signature_def.method_name != signature_constants.CLASSIFY_METHOD_NAME:
return False
if (set(signature_def.inputs.keys())
!= set([signature_constants.CLASSIFY_INPUTS])):
return False
if (signature_def.inputs[signature_constants.CLASSIFY_INPUTS].dtype !=
types_pb2.DT_STRING):
return False
allowed_outputs = set([signature_constants.CLASSIFY_OUTPUT_CLASSES,
signature_constants.CLASSIFY_OUTPUT_SCORES])
if not signature_def.outputs.keys():
return False
if set(signature_def.outputs.keys()) - allowed_outputs:
return False
if (signature_constants.CLASSIFY_OUTPUT_CLASSES in signature_def.outputs
and
signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES].dtype
!= types_pb2.DT_STRING):
return False
if (signature_constants.CLASSIFY_OUTPUT_SCORES in signature_def.outputs
and
signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_SCORES].dtype !=
types_pb2.DT_FLOAT):
return False
return True
| apache-2.0 | -7,941,086,029,780,913,000 | 36.619335 | 80 | 0.730244 | false |
armab/st2contrib | packs/opsgenie/actions/get_heartbeat.py | 4 | 1262 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from lib.actions import OpsGenieBaseAction
class GetHeartbeatAction(OpsGenieBaseAction):
def run(self, name):
"""
Retrieve details of heartbeat monitors in OpsGenie.
Args:
- name: Name of the heartbeat.
Returns:
- dict: Data from OpsGenie.
"""
body = {"apiKey": self.api_key,
"name": name}
data = self._req("GET",
"v1/json/heartbeat",
body=body)
return data
| apache-2.0 | 9,021,944,181,910,755,000 | 33.108108 | 74 | 0.667195 | false |
lycying/seeking | sklib/ui/uiplugin.py | 1 | 7976 | # coding:utf-8
#
# Copyright (c) 2010, guo.li <[email protected]>
# Site < http://code.google.com/p/seeking/ >
# All rights reserved.
# vim: set ft=python sw=2 ts=2 et:
#
import tempfile
import zipfile
import sys
from PyQt5.QtGui import QDialog
from PyQt5.QtGui import QTreeWidgetItem
from PyQt5.QtGui import QMenu
from PyQt5.QtGui import QAction
from PyQt5.QtGui import QApplication
from PyQt5.QtGui import QDesktopWidget
from PyQt5.QtCore import QObject
from PyQt5.QtCore import SIGNAL
from PyQt5.QtCore import QUrl
from PyQt5.QtCore import QFile
from PyQt5.QtNetwork import QHttp
from .Ui_PluginInfoDialog import Ui_PluginInfoDialog
from .Ui_PluginRequestDialog import Ui_PluginRequestDialog
from ..xplugin import PluginAdapter
from ..config import Prefs,getPrccisePath
class PluginInfoDialog(QDialog,Ui_PluginInfoDialog):
"""
the plugin info dialog
"""
def __init__(self,parent=None):
QDialog.__init__(self,parent)
self.setupUi(self)
#center this window
screen = QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width()-size.width())/2, (screen.height()-size.height())/2)
for item in PluginAdapter().new().readInfos():
itree = QTreeWidgetItem()
for i in range(6):
itree.setText(i,str(item[i]))
self.pluginList.addTopLevelItem(itree)
QObject.connect(self.pluginList, SIGNAL("customContextMenuRequested (const QPoint&)"),self.__evt_contextmenu)
def __evt_contextmenu(self,point):
"""
show the right menu
"""
item = self.pluginList.currentItem()
if item:
menu = QMenu(self)
if item.text(4)=="True":
action = QAction(QApplication.translate("PluginInfoDialog", "DeActive"),self,\
triggered=lambda re,name=item.text(0):self.__evt_toggle_active(name, False))
#core plugin not allowed disabled
action.setEnabled(not PluginAdapter.new().getPluginByName(item.text(0)).packageName == "__core__")
else:
action = QAction(QApplication.translate("PluginInfoDialog", "Active"),self,\
triggered=lambda re,name=item.text(0):self.__evt_toggle_active(name, True))
menu.addAction(action)
menu.exec_(self.mapToGlobal(self.pluginList.mapTo(self,point)))
def __evt_toggle_active(self,name,value):
"""
make plugin active or deactive
"""
item = self.pluginList.currentItem()
item.setText(4,str(value))
mod = PluginAdapter.new().getPluginByName(name)
if mod and hasattr(mod,"deactivate"):
mod.deactivate()
Prefs.new().setPluginState(name,value)
class PluginRequestDialog(QDialog,Ui_PluginRequestDialog):
"""
the plugin install dialog
"""
def __init__(self,parent=None):
QDialog.__init__(self,parent)
self.setupUi(self)
#center this window
screen = QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width()-size.width())/2, (screen.height()-size.height())/2)
QObject.connect(self.pluginList, SIGNAL("customContextMenuRequested (const QPoint&)"),self.__evt_contextmenu)
QObject.connect(self.update,SIGNAL("clicked ()"),self.__evt_update)
self.__http = None
self.__downloadFile = None
self.__baseUrl = "http://localhost/"
self.plugin_txt_url.setText("%splugins.txt"%self.__baseUrl)
self.__evt_update()
def __evt_update(self):
self.progressBar.setValue(0)
self.download(QUrl(self.plugin_txt_url.text()))
def download(self,url):
"""
download something
"""
if None == self.__http:
self.__http = QHttp()
QObject.connect(self.__http, SIGNAL("done(bool)"), self.__downloadFileDone)
QObject.connect(self.__http, SIGNAL("dataReadProgress(int, int)"), self.__dataReadProgress)
if QUrl(url).scheme() == 'https':
connectionMode = QHttp.ConnectionModeHttps
else:
connectionMode = QHttp.ConnectionModeHttp
self.__http.setHost(url.host(),connectionMode,url.port(80))
self.__downloadFile = QFile(tempfile.NamedTemporaryFile().name)
self.__http.get(url.path(),self.__downloadFile)
def __downloadFileDone(self, error):
"""
Private method called, after the file has been downloaded
from the internet.
@param error flag indicating an error condition (boolean)
"""
if self.__downloadFile.exists():
filename = self.__downloadFile.fileName()
#Notice must flush it first
self.__downloadFile.flush()
if not zipfile.is_zipfile(filename):
plugins_info = open(filename).readlines()
self.pluginList.clear()
for plugin_info in plugins_info:
try:
plugin_name = plugin_info.split('|')[0]
plugin_version = plugin_info.split('|')[1]
plugin_instruction = plugin_info.split('|')[2]
plugin_author = plugin_info.split('|')[3]
itree = QTreeWidgetItem()
itree.setText(0,plugin_name)
itree.setText(1,plugin_author)
itree.setText(2,plugin_version)
itree.setText(3,plugin_instruction)
self.pluginList.addTopLevelItem(itree)
except Exception as e:
raise e
else:
pluginDir = getPrccisePath("pluginsDir", "", "extdir") if sys.platform.startswith("win") else getPrccisePath("pluginsDir", "", "userdir")
tmpZip = zipfile.ZipFile(filename)
for file in tmpZip.namelist():
tmpZip.extract(file,pluginDir)
tmpZip.close()
self.result_label.setText("Success install the plugin ")
def __dataReadProgress(self, done, total):
"""
Private slot to show the download progress.
@param done number of bytes downloaded so far (integer)
@param total total bytes to be downloaded (integer)
"""
self.progressBar.setMaximum(total)
self.progressBar.setValue(done)
def __has_this_plugin(self,plugin_name):
"""
Check if this plugin has installed
"""
for item in PluginAdapter().new().readInfos():
if item[0]==plugin_name:
return item
return None
def __evt_contextmenu(self,point):
"""
show the right menu
"""
item = self.pluginList.currentItem()
if item:
menu = QMenu(self)
action = QAction(QApplication.translate("default","Install plugins..."),self,triggered=lambda:self.__evt_install_plugin(item))
menu.addAction(action)
action = QAction(QApplication.translate("default","Uninstall plugins..."),self)
menu.addAction(action)
menu.exec_(self.mapToGlobal(self.pluginList.mapTo(self,point)))
def __evt_install_plugin(self,item):
filename = "plugin-%s-%s.zip"%(item.text(0),item.text(2))
if not None == self.__has_this_plugin(item.text(0)):
self.download(QUrl("%s%s"%(self.__baseUrl,filename)))
else:
self.result_label.setText("This plugin '%s' had installed "%item.text(0))
| gpl-2.0 | 572,074,812,808,509,600 | 36.800948 | 156 | 0.571464 | false |
anastasia-tarasova/indy-sdk | docs/how-tos/send-secure-msg/python/template.py | 2 | 1108 | import asyncio
import time
import re
# Step 5 code goes here, replacing the prep() stub.
async def prep(wallet_handle, my_vk, their_vk, msg):
print('prepping %s' % msg)
# Step 3 code goes here, replacing the init() stub.
async def init():
return None, None, None, None, None
# Step 6 code goes here, replacing the read() stub.
async def read(wallet_handle, my_vk):
print('reading')
async def demo():
wallet_handle, my_did, my_vk, their_did, their_vk = await init()
while True:
argv = input('> ').strip().split(' ')
cmd = argv[0].lower()
rest = ' '.join(argv[1:])
if re.match(cmd, 'prep'):
await prep(wallet_handle, my_vk, their_vk, rest)
elif re.match(cmd, 'read'):
await read(wallet_handle, my_vk)
elif re.match(cmd, 'quit'):
break
else:
print('Huh?')
if __name__ == '__main__':
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(demo())
time.sleep(1) # waiting for libindy thread complete
except KeyboardInterrupt:
print('')
| apache-2.0 | 3,446,480,104,081,199,600 | 27.410256 | 68 | 0.58213 | false |
keen99/SickRage | sickbeard/metadata/generic.py | 1 | 41241 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import xml.etree.cElementTree as etree
import re
import sickbeard
from sickbeard import helpers
from sickbeard.metadata import helpers as metadata_helpers
from sickbeard import logger
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from sickbeard.show_name_helpers import allPossibleShowNames
from tmdb_api.tmdb_api import TMDB
import fanart
from fanart.core import Request as fanartRequest
class GenericMetadata():
"""
Base class for all metadata providers. Default behavior is meant to mostly
follow KODI 12+ metadata standards. Has support for:
- show metadata file
- episode metadata file
- episode thumbnail
- show fanart
- show poster
- show banner
- season thumbnails (poster)
- season thumbnails (banner)
- season all poster
- season all banner
"""
def __init__(self,
show_metadata=False,
episode_metadata=False,
fanart=False,
poster=False,
banner=False,
episode_thumbnails=False,
season_posters=False,
season_banners=False,
season_all_poster=False,
season_all_banner=False):
self.name = "Generic"
self._ep_nfo_extension = "nfo"
self._show_metadata_filename = "tvshow.nfo"
self.fanart_name = "fanart.jpg"
self.poster_name = "poster.jpg"
self.banner_name = "banner.jpg"
self.season_all_poster_name = "season-all-poster.jpg"
self.season_all_banner_name = "season-all-banner.jpg"
self.show_metadata = show_metadata
self.episode_metadata = episode_metadata
self.fanart = fanart
self.poster = poster
self.banner = banner
self.episode_thumbnails = episode_thumbnails
self.season_posters = season_posters
self.season_banners = season_banners
self.season_all_poster = season_all_poster
self.season_all_banner = season_all_banner
def get_config(self):
config_list = [self.show_metadata, self.episode_metadata, self.fanart, self.poster, self.banner,
self.episode_thumbnails, self.season_posters, self.season_banners, self.season_all_poster,
self.season_all_banner]
return '|'.join([str(int(x)) for x in config_list])
def get_id(self):
return GenericMetadata.makeID(self.name)
@staticmethod
def makeID(name):
name_id = re.sub("[+]", "plus", name)
name_id = re.sub("[^\w\d_]", "_", name_id).lower()
return name_id
def set_config(self, string):
config_list = [bool(int(x)) for x in string.split('|')]
self.show_metadata = config_list[0]
self.episode_metadata = config_list[1]
self.fanart = config_list[2]
self.poster = config_list[3]
self.banner = config_list[4]
self.episode_thumbnails = config_list[5]
self.season_posters = config_list[6]
self.season_banners = config_list[7]
self.season_all_poster = config_list[8]
self.season_all_banner = config_list[9]
def _has_show_metadata(self, show_obj):
result = ek.ek(os.path.isfile, self.get_show_file_path(show_obj))
logger.log(u"Checking if " + self.get_show_file_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_episode_metadata(self, ep_obj):
result = ek.ek(os.path.isfile, self.get_episode_file_path(ep_obj))
logger.log(u"Checking if " + self.get_episode_file_path(ep_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_fanart(self, show_obj):
result = ek.ek(os.path.isfile, self.get_fanart_path(show_obj))
logger.log(u"Checking if " + self.get_fanart_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_poster(self, show_obj):
result = ek.ek(os.path.isfile, self.get_poster_path(show_obj))
logger.log(u"Checking if " + self.get_poster_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_banner(self, show_obj):
result = ek.ek(os.path.isfile, self.get_banner_path(show_obj))
logger.log(u"Checking if " + self.get_banner_path(show_obj) + " exists: " + str(result), logger.DEBUG)
return result
def _has_episode_thumb(self, ep_obj):
location = self.get_episode_thumb_path(ep_obj)
result = location != None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_poster(self, show_obj, season):
location = self.get_season_poster_path(show_obj, season)
result = location != None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_banner(self, show_obj, season):
location = self.get_season_banner_path(show_obj, season)
result = location != None and ek.ek(os.path.isfile, location)
if location:
logger.log(u"Checking if " + location + " exists: " + str(result), logger.DEBUG)
return result
def _has_season_all_poster(self, show_obj):
result = ek.ek(os.path.isfile, self.get_season_all_poster_path(show_obj))
logger.log(u"Checking if " + self.get_season_all_poster_path(show_obj) + " exists: " + str(result),
logger.DEBUG)
return result
def _has_season_all_banner(self, show_obj):
result = ek.ek(os.path.isfile, self.get_season_all_banner_path(show_obj))
logger.log(u"Checking if " + self.get_season_all_banner_path(show_obj) + " exists: " + str(result),
logger.DEBUG)
return result
def get_show_file_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self._show_metadata_filename)
def get_episode_file_path(self, ep_obj):
return helpers.replaceExtension(ep_obj.location, self._ep_nfo_extension)
def get_fanart_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.fanart_name)
def get_poster_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.poster_name)
def get_banner_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.banner_name)
def get_episode_thumb_path(self, ep_obj):
"""
Returns the path where the episode thumbnail should be stored.
ep_obj: a TVEpisode instance for which to create the thumbnail
"""
if ek.ek(os.path.isfile, ep_obj.location):
tbn_filename = ep_obj.location.rpartition(".")
if tbn_filename[0] == "":
tbn_filename = ep_obj.location + "-thumb.jpg"
else:
tbn_filename = tbn_filename[0] + "-thumb.jpg"
else:
return None
return tbn_filename
def get_season_poster_path(self, show_obj, season):
"""
Returns the full path to the file for a given season poster.
show_obj: a TVShow instance for which to generate the path
season: a season number to be used for the path. Note that season 0
means specials.
"""
# Our specials thumbnail is, well, special
if season == 0:
season_poster_filename = 'season-specials'
else:
season_poster_filename = 'season' + str(season).zfill(2)
return ek.ek(os.path.join, show_obj.location, season_poster_filename + '-poster.jpg')
def get_season_banner_path(self, show_obj, season):
"""
Returns the full path to the file for a given season banner.
show_obj: a TVShow instance for which to generate the path
season: a season number to be used for the path. Note that season 0
means specials.
"""
# Our specials thumbnail is, well, special
if season == 0:
season_banner_filename = 'season-specials'
else:
season_banner_filename = 'season' + str(season).zfill(2)
return ek.ek(os.path.join, show_obj.location, season_banner_filename + '-banner.jpg')
def get_season_all_poster_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.season_all_poster_name)
def get_season_all_banner_path(self, show_obj):
return ek.ek(os.path.join, show_obj.location, self.season_all_banner_name)
def _show_data(self, show_obj):
"""
This should be overridden by the implementing class. It should
provide the content of the show metadata file.
"""
return None
def _ep_data(self, ep_obj):
"""
This should be overridden by the implementing class. It should
provide the content of the episode metadata file.
"""
return None
def create_show_metadata(self, show_obj):
if self.show_metadata and show_obj and not self._has_show_metadata(show_obj):
logger.log(u"Metadata provider " + self.name + " creating show metadata for " + show_obj.name, logger.DEBUG)
return self.write_show_file(show_obj)
return False
def create_episode_metadata(self, ep_obj):
if self.episode_metadata and ep_obj and not self._has_episode_metadata(ep_obj):
logger.log(u"Metadata provider " + self.name + " creating episode metadata for " + ep_obj.prettyName(),
logger.DEBUG)
return self.write_ep_file(ep_obj)
return False
def update_show_indexer_metadata(self, show_obj):
if self.show_metadata and show_obj and self._has_show_metadata(show_obj):
logger.log(
u"Metadata provider " + self.name + " updating show indexer info metadata file for " + show_obj.name,
logger.DEBUG)
nfo_file_path = self.get_show_file_path(show_obj)
try:
with ek.ek(open, nfo_file_path, 'r') as xmlFileObj:
showXML = etree.ElementTree(file=xmlFileObj)
indexerid = showXML.find('id')
root = showXML.getroot()
if indexerid:
indexerid.text = show_obj.indexerid
else:
etree.SubElement(root, "id").text = str(show_obj.indexerid)
# Make it purdy
helpers.indentXML(root)
showXML.write(nfo_file_path)
helpers.chmodAsParent(nfo_file_path)
return True
except IOError, e:
logger.log(
u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e),
logger.ERROR)
def create_fanart(self, show_obj):
if self.fanart and show_obj and not self._has_fanart(show_obj):
logger.log(u"Metadata provider " + self.name + " creating fanart for " + show_obj.name, logger.DEBUG)
return self.save_fanart(show_obj)
return False
def create_poster(self, show_obj):
if self.poster and show_obj and not self._has_poster(show_obj):
logger.log(u"Metadata provider " + self.name + " creating poster for " + show_obj.name, logger.DEBUG)
return self.save_poster(show_obj)
return False
def create_banner(self, show_obj):
if self.banner and show_obj and not self._has_banner(show_obj):
logger.log(u"Metadata provider " + self.name + " creating banner for " + show_obj.name, logger.DEBUG)
return self.save_banner(show_obj)
return False
def create_episode_thumb(self, ep_obj):
if self.episode_thumbnails and ep_obj and not self._has_episode_thumb(ep_obj):
logger.log(u"Metadata provider " + self.name + " creating episode thumbnail for " + ep_obj.prettyName(),
logger.DEBUG)
return self.save_thumbnail(ep_obj)
return False
def create_season_posters(self, show_obj):
if self.season_posters and show_obj:
result = []
for season, episodes in show_obj.episodes.iteritems(): # @UnusedVariable
if not self._has_season_poster(show_obj, season):
logger.log(u"Metadata provider " + self.name + " creating season posters for " + show_obj.name,
logger.DEBUG)
result = result + [self.save_season_posters(show_obj, season)]
return all(result)
return False
def create_season_banners(self, show_obj):
if self.season_banners and show_obj:
result = []
for season, episodes in show_obj.episodes.iteritems(): # @UnusedVariable
if not self._has_season_banner(show_obj, season):
logger.log(u"Metadata provider " + self.name + " creating season banners for " + show_obj.name,
logger.DEBUG)
result = result + [self.save_season_banners(show_obj, season)]
return all(result)
return False
def create_season_all_poster(self, show_obj):
if self.season_all_poster and show_obj and not self._has_season_all_poster(show_obj):
logger.log(u"Metadata provider " + self.name + " creating season all poster for " + show_obj.name,
logger.DEBUG)
return self.save_season_all_poster(show_obj)
return False
def create_season_all_banner(self, show_obj):
if self.season_all_banner and show_obj and not self._has_season_all_banner(show_obj):
logger.log(u"Metadata provider " + self.name + " creating season all banner for " + show_obj.name,
logger.DEBUG)
return self.save_season_all_banner(show_obj)
return False
def _get_episode_thumb_url(self, ep_obj):
"""
Returns the URL to use for downloading an episode's thumbnail. Uses
theTVDB.com and TVRage.com data.
ep_obj: a TVEpisode object for which to grab the thumb URL
"""
all_eps = [ep_obj] + ep_obj.relatedEps
# validate show
if not helpers.validateShow(ep_obj.show):
return None
# try all included episodes in case some have thumbs and others don't
for cur_ep in all_eps:
myEp = helpers.validateShow(cur_ep.show, cur_ep.season, cur_ep.episode)
if not myEp:
continue
thumb_url = getattr(myEp, 'filename', None)
if thumb_url is not None:
return thumb_url
return None
def write_show_file(self, show_obj):
"""
Generates and writes show_obj's metadata under the given path to the
filename given by get_show_file_path()
show_obj: TVShow object for which to create the metadata
path: An absolute or relative path where we should put the file. Note that
the file name will be the default show_file_name.
Note that this method expects that _show_data will return an ElementTree
object. If your _show_data returns data in another format you'll need to
override this method.
"""
data = self._show_data(show_obj)
if not data:
return False
nfo_file_path = self.get_show_file_path(show_obj)
nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
try:
if not ek.ek(os.path.isdir, nfo_file_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
ek.ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing show nfo file to " + nfo_file_path, logger.DEBUG)
nfo_file = ek.ek(open, nfo_file_path, 'w')
data.write(nfo_file, encoding="utf-8")
nfo_file.close()
helpers.chmodAsParent(nfo_file_path)
except IOError, e:
logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e),
logger.ERROR)
return False
return True
def write_ep_file(self, ep_obj):
"""
Generates and writes ep_obj's metadata under the given path with the
given filename root. Uses the episode's name with the extension in
_ep_nfo_extension.
ep_obj: TVEpisode object for which to create the metadata
file_name_path: The file name to use for this metadata. Note that the extension
will be automatically added based on _ep_nfo_extension. This should
include an absolute path.
Note that this method expects that _ep_data will return an ElementTree
object. If your _ep_data returns data in another format you'll need to
override this method.
"""
data = self._ep_data(ep_obj)
if not data:
return False
nfo_file_path = self.get_episode_file_path(ep_obj)
nfo_file_dir = ek.ek(os.path.dirname, nfo_file_path)
try:
if not ek.ek(os.path.isdir, nfo_file_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + nfo_file_dir, logger.DEBUG)
ek.ek(os.makedirs, nfo_file_dir)
helpers.chmodAsParent(nfo_file_dir)
logger.log(u"Writing episode nfo file to " + nfo_file_path, logger.DEBUG)
nfo_file = ek.ek(open, nfo_file_path, 'w')
data.write(nfo_file, encoding="utf-8")
nfo_file.close()
helpers.chmodAsParent(nfo_file_path)
except IOError, e:
logger.log(u"Unable to write file to " + nfo_file_path + " - are you sure the folder is writable? " + ex(e),
logger.ERROR)
return False
return True
def save_thumbnail(self, ep_obj):
"""
Retrieves a thumbnail and saves it to the correct spot. This method should not need to
be overridden by implementing classes, changing get_episode_thumb_path and
_get_episode_thumb_url should suffice.
ep_obj: a TVEpisode object for which to generate a thumbnail
"""
file_path = self.get_episode_thumb_path(ep_obj)
if not file_path:
logger.log(u"Unable to find a file path to use for this thumbnail, not generating it", logger.DEBUG)
return False
thumb_url = self._get_episode_thumb_url(ep_obj)
# if we can't find one then give up
if not thumb_url:
logger.log(u"No thumb is available for this episode, not creating a thumb", logger.DEBUG)
return False
thumb_data = metadata_helpers.getShowImage(thumb_url)
result = self._write_image(thumb_data, file_path, ep_obj)
if not result:
return False
for cur_ep in [ep_obj] + ep_obj.relatedEps:
cur_ep.hastbn = True
return True
def save_fanart(self, show_obj, which=None):
"""
Downloads a fanart image and saves it to the filename specified by fanart_name
inside the show's root folder.
show_obj: a TVShow object for which to download fanart
"""
# use the default fanart name
fanart_path = self.get_fanart_path(show_obj)
fanart_data = self._retrieve_show_image('fanart', show_obj, which)
if not fanart_data:
logger.log(u"No fanart image was retrieved, unable to write fanart", logger.DEBUG)
return False
return self._write_image(fanart_data, fanart_path, show_obj)
def save_poster(self, show_obj, which=None):
"""
Downloads a poster image and saves it to the filename specified by poster_name
inside the show's root folder.
show_obj: a TVShow object for which to download a poster
"""
# use the default poster name
poster_path = self.get_poster_path(show_obj)
poster_data = self._retrieve_show_image('poster', show_obj, which)
if not poster_data:
logger.log(u"No show poster image was retrieved, unable to write poster", logger.DEBUG)
return False
return self._write_image(poster_data, poster_path, show_obj)
def save_banner(self, show_obj, which=None):
"""
Downloads a banner image and saves it to the filename specified by banner_name
inside the show's root folder.
show_obj: a TVShow object for which to download a banner
"""
# use the default banner name
banner_path = self.get_banner_path(show_obj)
banner_data = self._retrieve_show_image('banner', show_obj, which)
if not banner_data:
logger.log(u"No show banner image was retrieved, unable to write banner", logger.DEBUG)
return False
return self._write_image(banner_data, banner_path, show_obj)
def save_season_posters(self, show_obj, season):
"""
Saves all season posters to disk for the given show.
show_obj: a TVShow object for which to save the season thumbs
Cycles through all seasons and saves the season posters if possible. This
method should not need to be overridden by implementing classes, changing
_season_posters_dict and get_season_poster_path should be good enough.
"""
season_dict = self._season_posters_dict(show_obj, season)
result = []
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
for cur_season in season_dict:
cur_season_art = season_dict[cur_season]
if len(cur_season_art) == 0:
continue
# Just grab whatever's there for now
art_id, season_url = cur_season_art.popitem() # @UnusedVariable
season_poster_file_path = self.get_season_poster_path(show_obj, cur_season)
if not season_poster_file_path:
logger.log(u"Path for season " + str(cur_season) + " came back blank, skipping this season",
logger.DEBUG)
continue
seasonData = metadata_helpers.getShowImage(season_url)
if not seasonData:
logger.log(u"No season poster data available, skipping this season", logger.DEBUG)
continue
result = result + [self._write_image(seasonData, season_poster_file_path, show_obj)]
if result:
return all(result)
else:
return False
return True
def save_season_banners(self, show_obj, season):
"""
Saves all season banners to disk for the given show.
show_obj: a TVShow object for which to save the season thumbs
Cycles through all seasons and saves the season banners if possible. This
method should not need to be overridden by implementing classes, changing
_season_banners_dict and get_season_banner_path should be good enough.
"""
season_dict = self._season_banners_dict(show_obj, season)
result = []
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
for cur_season in season_dict:
cur_season_art = season_dict[cur_season]
if len(cur_season_art) == 0:
continue
# Just grab whatever's there for now
art_id, season_url = cur_season_art.popitem() # @UnusedVariable
season_banner_file_path = self.get_season_banner_path(show_obj, cur_season)
if not season_banner_file_path:
logger.log(u"Path for season " + str(cur_season) + " came back blank, skipping this season",
logger.DEBUG)
continue
seasonData = metadata_helpers.getShowImage(season_url)
if not seasonData:
logger.log(u"No season banner data available, skipping this season", logger.DEBUG)
continue
result = result + [self._write_image(seasonData, season_banner_file_path, show_obj)]
if result:
return all(result)
else:
return False
return True
def save_season_all_poster(self, show_obj, which=None):
# use the default season all poster name
poster_path = self.get_season_all_poster_path(show_obj)
poster_data = self._retrieve_show_image('poster', show_obj, which)
if not poster_data:
logger.log(u"No show poster image was retrieved, unable to write season all poster", logger.DEBUG)
return False
return self._write_image(poster_data, poster_path, show_obj)
def save_season_all_banner(self, show_obj, which=None):
# use the default season all banner name
banner_path = self.get_season_all_banner_path(show_obj)
banner_data = self._retrieve_show_image('banner', show_obj, which)
if not banner_data:
logger.log(u"No show banner image was retrieved, unable to write season all banner", logger.DEBUG)
return False
return self._write_image(banner_data, banner_path, show_obj)
def _write_image(self, image_data, image_path, obj = None):
"""
Saves the data in image_data to the location image_path. Returns True/False
to represent success or failure.
image_data: binary image data to write to file
image_path: file location to save the image to
"""
# don't bother overwriting it
if ek.ek(os.path.isfile, image_path):
logger.log(u"Image already exists, not downloading", logger.DEBUG)
return False
image_dir = ek.ek(os.path.dirname, image_path)
if not image_data:
logger.log(u"Unable to retrieve image to %s to save in %s, skipping" % ( ek.ss(obj.prettyName()), ek.ss(image_dir) ), logger.WARNING)
return False
try:
if not ek.ek(os.path.isdir, image_dir):
logger.log(u"Metadata dir didn't exist, creating it at " + image_dir, logger.DEBUG)
ek.ek(os.makedirs, image_dir)
helpers.chmodAsParent(image_dir)
outFile = ek.ek(open, image_path, 'wb')
outFile.write(image_data)
outFile.close()
helpers.chmodAsParent(image_path)
except IOError, e:
logger.log(
u"Unable to write image to " + image_path + " - are you sure the show folder is writable? " + ex(e),
logger.ERROR)
return False
return True
def _retrieve_show_image(self, image_type, show_obj, which=None):
"""
Gets an image URL from theTVDB.com and TMDB.com, downloads it and returns the data.
image_type: type of image to retrieve (currently supported: fanart, poster, banner)
show_obj: a TVShow object to use when searching for the image
which: optional, a specific numbered poster to look for
Returns: the binary image data if available, or else None
"""
image_url = None
indexer_lang = show_obj.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy()
lINDEXER_API_PARMS['banners'] = True
if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
lINDEXER_API_PARMS['language'] = indexer_lang
if show_obj.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS)
indexer_show_obj = t[show_obj.indexerid]
except (sickbeard.indexer_error, IOError), e:
logger.log(u"Unable to look up show on " + sickbeard.indexerApi(
show_obj.indexer).name + ", not downloading images: " + ex(e), logger.WARNING)
logger.log(u"Indexer " + sickbeard.indexerApi(show_obj.indexer).name + "maybe experiencing some problems. Try again later", logger.DEBUG)
return None
if image_type not in ('fanart', 'poster', 'banner', 'poster_thumb', 'banner_thumb'):
logger.log(u"Invalid image type " + str(image_type) + ", couldn't find it in the " + sickbeard.indexerApi(
show_obj.indexer).name + " object", logger.ERROR)
return None
if image_type == 'poster_thumb':
if getattr(indexer_show_obj, 'poster', None) is not None:
image_url = re.sub('posters', '_cache/posters', indexer_show_obj['poster'])
if not image_url:
# Try and get images from Fanart.TV
image_url = self._retrieve_show_images_from_fanart(show_obj, image_type)
if not image_url:
# Try and get images from TMDB
image_url = self._retrieve_show_images_from_tmdb(show_obj, image_type)
elif image_type == 'banner_thumb':
if getattr(indexer_show_obj, 'banner', None) is not None:
image_url = re.sub('graphical', '_cache/graphical', indexer_show_obj['banner'])
if not image_url:
# Try and get images from Fanart.TV
image_url = self._retrieve_show_images_from_fanart(show_obj, image_type)
else:
if getattr(indexer_show_obj, image_type, None) is not None:
image_url = indexer_show_obj[image_type]
if not image_url:
# Try and get images from Fanart.TV
image_url = self._retrieve_show_images_from_fanart(show_obj, image_type)
if not image_url:
# Try and get images from TMDB
image_url = self._retrieve_show_images_from_tmdb(show_obj, image_type)
if image_url:
image_data = metadata_helpers.getShowImage(image_url, which)
return image_data
return None
def _season_posters_dict(self, show_obj, season):
"""
Should return a dict like:
result = {<season number>:
{1: '<url 1>', 2: <url 2>, ...},}
"""
# This holds our resulting dictionary of season art
result = {}
indexer_lang = show_obj.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy()
lINDEXER_API_PARMS['banners'] = True
if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
lINDEXER_API_PARMS['language'] = indexer_lang
if show_obj.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS)
indexer_show_obj = t[show_obj.indexerid]
except (sickbeard.indexer_error, IOError), e:
logger.log(u"Unable to look up show on " + sickbeard.indexerApi(
show_obj.indexer).name + ", not downloading images: " + ex(e), logger.WARNING)
logger.log(u"Indexer " + sickbeard.indexerApi(show_obj.indexer).name + "maybe experiencing some problems. Try again later", logger.DEBUG)
return result
# if we have no season banners then just finish
if getattr(indexer_show_obj, '_banners', None) is None:
return result
if 'season' not in indexer_show_obj['_banners'] or 'season' not in indexer_show_obj['_banners']['season']:
return result
# Give us just the normal poster-style season graphics
seasonsArtObj = indexer_show_obj['_banners']['season']['season']
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
result[season] = {}
# find the correct season in the TVDB and TVRAGE object and just copy the dict into our result dict
for seasonArtID in seasonsArtObj.keys():
if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == sickbeard.INDEXER_DEFAULT_LANGUAGE:
result[season][seasonArtID] = seasonsArtObj[seasonArtID]['_bannerpath']
return result
def _season_banners_dict(self, show_obj, season):
"""
Should return a dict like:
result = {<season number>:
{1: '<url 1>', 2: <url 2>, ...},}
"""
# This holds our resulting dictionary of season art
result = {}
indexer_lang = show_obj.lang
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy()
lINDEXER_API_PARMS['banners'] = True
if indexer_lang and not indexer_lang == sickbeard.INDEXER_DEFAULT_LANGUAGE:
lINDEXER_API_PARMS['language'] = indexer_lang
t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS)
indexer_show_obj = t[show_obj.indexerid]
except (sickbeard.indexer_error, IOError), e:
logger.log(u"Unable to look up show on " + sickbeard.indexerApi(
show_obj.indexer).name + ", not downloading images: " + ex(e), logger.WARNING)
logger.log(u"Indexer " + sickbeard.indexerApi(show_obj.indexer).name + "maybe experiencing some problems. Try again later", logger.DEBUG)
return result
# if we have no season banners then just finish
if getattr(indexer_show_obj, '_banners', None) is None:
return result
# if we have no season banners then just finish
if 'season' not in indexer_show_obj['_banners'] or 'seasonwide' not in indexer_show_obj['_banners']['season']:
return result
# Give us just the normal season graphics
seasonsArtObj = indexer_show_obj['_banners']['season']['seasonwide']
# Returns a nested dictionary of season art with the season
# number as primary key. It's really overkill but gives the option
# to present to user via ui to pick down the road.
result[season] = {}
# find the correct season in the TVDB and TVRAGE object and just copy the dict into our result dict
for seasonArtID in seasonsArtObj.keys():
if int(seasonsArtObj[seasonArtID]['season']) == season and seasonsArtObj[seasonArtID]['language'] == sickbeard.INDEXER_DEFAULT_LANGUAGE:
result[season][seasonArtID] = seasonsArtObj[seasonArtID]['_bannerpath']
return result
def retrieveShowMetadata(self, folder):
"""
Used only when mass adding Existing Shows, using previously generated Show metadata to reduce the need to query TVDB.
"""
empty_return = (None, None, None)
metadata_path = ek.ek(os.path.join, folder, self._show_metadata_filename)
if not ek.ek(os.path.isdir, folder) or not ek.ek(os.path.isfile, metadata_path):
logger.log(u"Can't load the metadata file from " + repr(metadata_path) + ", it doesn't exist", logger.DEBUG)
return empty_return
logger.log(u"Loading show info from metadata file in " + folder, logger.DEBUG)
try:
with ek.ek(open, metadata_path, 'r') as xmlFileObj:
showXML = etree.ElementTree(file=xmlFileObj)
if showXML.findtext('title') == None \
or (showXML.findtext('tvdbid') == None
and showXML.findtext('id') == None):
logger.log(u"Invalid info in tvshow.nfo (missing name or id):" \
+ str(showXML.findtext('title')) + " " \
+ str(showXML.findtext('tvdbid')) + " " \
+ str(showXML.findtext('id')))
return empty_return
name = showXML.findtext('title')
if showXML.findtext('tvdbid') != None:
indexer_id = int(showXML.findtext('tvdbid'))
elif showXML.findtext('id') != None:
indexer_id = int(showXML.findtext('id'))
else:
logger.log(u"Empty <id> or <tvdbid> field in NFO, unable to find a ID", logger.WARNING)
return empty_return
if indexer_id is None:
logger.log(u"Invalid Indexer ID (" + str(indexer_id) + "), not using metadata file", logger.WARNING)
return empty_return
indexer = None
if showXML.find('episodeguide/url') != None:
epg_url = showXML.findtext('episodeguide/url').lower()
if str(indexer_id) in epg_url:
if 'thetvdb.com' in epg_url:
indexer = 1
elif 'tvrage' in epg_url:
indexer = 2
except Exception, e:
logger.log(
u"There was an error parsing your existing metadata file: '" + metadata_path + "' error: " + ex(e),
logger.WARNING)
return empty_return
return (indexer_id, name, indexer)
def _retrieve_show_images_from_tmdb(self, show, type):
types = {'poster': 'poster_path',
'banner': None,
'fanart': 'backdrop_path',
'poster_thumb': 'poster_path',
'banner_thumb': None}
# get TMDB configuration info
tmdb = TMDB(sickbeard.TMDB_API_KEY)
config = tmdb.Configuration()
response = config.info()
base_url = response['images']['base_url']
sizes = response['images']['poster_sizes']
def size_str_to_int(x):
return float("inf") if x == 'original' else int(x[1:])
max_size = max(sizes, key=size_str_to_int)
try:
search = tmdb.Search()
for show_name in set(allPossibleShowNames(show)):
for result in search.collection({'query': show_name})['results'] + search.tv({'query': show_name})['results']:
if types[type] and getattr(result, types[type]):
return "{0}{1}{2}".format(base_url, max_size, result[types[type]])
except Exception as e:
pass
logger.log(u"Could not find any " + type + " images on TMDB for " + show.name, logger.DEBUG)
def _retrieve_show_images_from_fanart(self, show, type, thumb=False):
types = {'poster': fanart.TYPE.TV.POSTER,
'banner': fanart.TYPE.TV.BANNER,
'poster_thumb': fanart.TYPE.TV.POSTER,
'banner_thumb': fanart.TYPE.TV.BANNER,
'fanart': fanart.TYPE.TV.BACKGROUND,
}
try:
indexerid = helpers.mapIndexersToShow(show)[1]
if indexerid:
request = fanartRequest(
apikey=sickbeard.FANART_API_KEY,
id=indexerid,
ws=fanart.WS.TV,
type=types[type],
sort=fanart.SORT.POPULAR,
limit=fanart.LIMIT.ONE,
)
resp = request.response()
url = resp[types[type]][0]['url']
if thumb:
url = re.sub('/fanart/', '/preview/', url)
return url
except Exception as e:
pass
logger.log(u"Could not find any " + type + " images on Fanart.tv for " + show.name, logger.DEBUG)
| gpl-3.0 | -6,739,465,871,409,190,000 | 38.807915 | 165 | 0.597609 | false |
DzinVision/adventofcode-2016 | day_10.py | 1 | 2554 | from collections import defaultdict
import re
commands = []
while True:
try:
commands.append(input())
except:
break
bots = {}
outputs = {}
class Bot:
number = ''
value_1 = -1
value_2 = -1
low_to = ''
low_to_output = False
high_to = ''
high_to_outupt = False
def give_value(self, value):
if self.value_1 == -1:
self.value_1 = value
elif self.value_2 == -1:
self.value_2 = value
def excecute(self):
if self.value_1 == -1 or self.value_2 == -1:
return
if min(self.value_1, self.value_2) == 17 and max(self.value_1, self.value_2) == 61:
print('#1:', self.number)
if not self.low_to_output:
bots[self.low_to].give_value(min(self.value_1, self.value_2))
bots[self.low_to].excecute()
else:
outputs[self.low_to][min(self.value_1, self.value_2)] += 1
if not self.high_to_outupt:
bots[self.high_to].give_value(max(self.value_1, self.value_2))
bots[self.high_to].excecute()
else:
outputs[self.high_to][max(self.value_1, self.value_2)] += 1
self.value_1 = -1
self.value_2 = -1
for command in commands:
match = re.findall(r'output (\d+)', command)
for m in match:
outputs[m] = defaultdict(int)
match = re.findall(r'bot (\d+)', command)
for m in match:
bot = Bot()
bot.number = m
bots[m] = bot
for command in commands:
if command[:3] == 'bot':
low_to = re.search(r'(\d+)', command[command.find('low'): command.find('high')]).group(1)
high_to = re.search(r'(\d+)', command[command.find('high'):]).group(1)
low_to_output = command[command.find('low'): command.find('high')].find('output') != -1
high_to_outupt = command[command.find('high'):].find('output') != -1
bot_num = re.match(r'bot (\d+)', command).group(1)
bot = bots[bot_num]
bot.low_to = low_to
bot.high_to = high_to
bot.low_to_output = low_to_output
bot.high_to_outupt = high_to_outupt
for command in commands:
if command[:5] == 'value':
match = re.match(r'value (\d+).*bot (\d+)', command)
value = int(match.group(1))
bot_num = match.group(2)
bot = bots[bot_num]
bot.give_value(value)
for bot in bots.values():
bot.excecute()
print('#2:', int(list(outputs['0'].keys())[0]) * int(list(outputs['1'].keys())[0]) * int(list(outputs['2'].keys())[0]))
| gpl-3.0 | -971,003,580,447,135,500 | 26.462366 | 119 | 0.542678 | false |
GoogleCloudPlatform/professional-services | examples/dataflow-data-generator/data-generator-pipeline/tests/test_fix_record_for_avro.py | 2 | 6031 | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastavro
import json
import logging
import unittest
from data_generator.AvroUtil import fix_record_for_avro
class TestAvroFixer(unittest.TestCase):
def test_fix_record_for_avro(self):
fastavro_schema = fastavro.parse_schema({
'type':
'record',
'name':
'AthleticsWorldRecords',
'fields': [{
'name': 'birthday',
'type': ['null', {
'logicalType': 'date',
'type': 'int'
}]
}, {
'name': 'athlete',
'type': 'string'
}, {
'name':
'race_start_time',
'type':
['null', {
'logicalType': 'time-micros',
'type': 'long'
}]
}, {
'name':
'race_start_datetime',
'type':
['null', {
'logicalType': 'timestamp-millis',
'type': 'long'
}]
}, {
'name':
'race_end_timestamp',
'type':
['null', {
'logicalType': 'timestamp-micros',
'type': 'long'
}]
}, {
'name': 'race_distance_m',
'type': 'int'
}, {
'name': 'time_seconds',
'type': 'float'
}, {
'name': 'is_world_record',
'type': 'boolean'
}, {
'name': 'rival_record',
'type': {
'type':
'record',
'name':
'RivalAthlete',
'fields': [{
'name':
'birthday',
'type':
['null', {
'logicalType': 'date',
'type': 'int'
}]
}, {
'name': 'athlete',
'type': 'string'
}, {
'name':
'race_start_time',
'type': [
'null', {
'logicalType': 'time-micros',
'type': 'long'
}
]
}, {
'name':
'race_start_datetime',
'type': [
'null', {
'logicalType': 'timestamp-millis',
'type': 'long'
}
]
}, {
'name':
'race_end_timestamp',
'type': [
'null', {
'logicalType': 'timestamp-micros',
'type': 'long'
}
]
}, {
'name': 'race_distance_m',
'type': 'int'
}, {
'name': 'time_seconds',
'type': 'float'
}, {
'name': 'is_world_record',
'type': 'boolean'
}]
}
}]
})
input_record = {
'birthday': '1988-12-17',
'athlete': 'David Rudisha',
'race_start_time': '20:20:00.00',
'race_start_datetime': '2012-09-08T20:20:00.00',
'race_end_timestamp': '2012-09-08T20:21:40.91',
'race_distance_m': 800,
'time_seconds': 100.91,
'is_world_record': True,
'rival_record': {
'birthday': '1995-06-15',
'athlete': 'Emmanuel Kipkurui Korir',
'race_start_time': '20:20:00.00',
'race_start_datetime': '2018-07-22T20:20:00.00',
'race_end_timestamp': '2018-07-22T20:21:42.05',
'race_distance_m': 800,
'time_seconds': 102.05,
'is_world_record': False
}
}
expected_output = [{
'birthday': 6925,
'athlete': 'David Rudisha',
'race_start_time': 73200000000,
'race_start_datetime': 1347135600000,
'race_end_timestamp': 1347135700910000,
'race_distance_m': 800,
'time_seconds': 100.91,
'is_world_record': True,
'rival_record': {
'birthday': 9296,
'athlete': 'Emmanuel Kipkurui Korir',
'race_start_time': 73200000000,
'race_start_datetime': 1532290800000,
'race_end_timestamp': 1532290902050000,
'race_distance_m': 800,
'time_seconds': 102.05,
'is_world_record': False
}
}]
output_record = fix_record_for_avro(input_record, fastavro_schema)
self.assertDictEqual(output_record[0], expected_output[0])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,888,950,075,616,894,000 | 32.882022 | 74 | 0.368596 | false |
dzolnierz/mysql-utilities | scripts/mysqluc.py | 2 | 6927 | #!/usr/bin/env python
#
# Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the mysql utilities client.
"""
import os
import sys
from mysql.utilities.common.tools import (check_connector_python,
check_python_version)
# Check Python version compatibility
check_python_version()
# Check for connector/python
if not check_connector_python():
sys.exit(1)
from mysql.utilities.common.options import (license_callback,
UtilitiesParser,
check_password_security)
try:
from mysql.utilities import VERSION_FRM, VERSION_STRING, COPYRIGHT_FULL
from mysql.utilities.exception import UtilError
from mysql.utilities.command.utilitiesconsole import UtilitiesConsole
from mysql.utilities.common.options import add_verbosity, check_verbosity
except:
print("ERROR: MySQL Utilities are either not installed or are not "
"accessible from this terminal.")
sys.exit(2)
# Constants
NAME = "MySQL Utilities Client - mysqluc "
DESCRIPTION = "mysqluc - Command line client for running MySQL Utilities"
USAGE = "%prog "
WELCOME_MESSAGE = """
Welcome to the MySQL Utilities Client (mysqluc) version {0}\n{1}
Type 'help' for a list of commands or press TAB twice for list of utilities.
"""
GOODBYE_MESSAGE = "\nThanks for using the MySQL Utilities Client!\n"
PRINT_WIDTH = 75
UTIL_PATH = "/scripts"
if __name__ == '__main__':
def build_variable_dictionary_list(args):
"""Build a variable dictionary from the arguments
Returns list - list of variables
"""
variables = []
arguments = list(args)
for arg in arguments[:]:
if '=' in arg:
try:
name, value = arg.split('=')
if not value:
raise ValueError
except ValueError:
parser.error("Invalid argument assignment: {0}. Please "
"check your command.".format(arg))
variables.append({'name': name, 'value': value})
arguments.remove(arg)
if len(arguments) > 0:
parser.error("Unbalanced arguments. Please check your command.")
for i in range(0, len(arguments), 2):
variables.append({'name': arguments[i], 'value': arguments[i + 1]})
return variables
# Setup the command parser
program = os.path.basename(sys.argv[0]).replace(".py", "")
parser = UtilitiesParser(
version=VERSION_FRM.format(program=program),
description=DESCRIPTION,
usage=USAGE,
add_help_option=False,
prog=program
)
# Default option to provide help information
parser.add_option("--help", action="help",
help="display this help message and exit")
# Add --License option
parser.add_option("--license", action='callback',
callback=license_callback,
help="display program's license and exit")
# Add display width option
parser.add_option("--width", action="store", dest="width",
type="int", help="display width",
default=PRINT_WIDTH)
# Add utility directory option
parser.add_option("--utildir", action="store", dest="utildir",
type="string", help="location of utilities",
default=UTIL_PATH)
# Add execute mode
parser.add_option("-e", "--execute", action="store", dest="commands",
type="string",
help="execute commands and exit. Multiple commands are "
"separated with semi-colons. Note: some platforms "
"may require double quotes around command list.",
default=None)
# Add utility extra_utilities option
parser.add_option("--add-utility", action="append", dest="add_util",
help="append an utility in the format mysql<utility_"
"name>. The mysql<utility_name>.py must be located "
"inside the folder given by the utildir option",
default=[])
# Add utility extra_utilities option
parser.add_option("--hide-utils", action="store_true",
dest="hide_util",
help="when this option is given, the default utilities "
"will not be available, must be used only along "
"of --add-utility option",
default=False)
# Add verbosity mode
add_verbosity(parser, True)
# Now we process the rest of the arguments.
opt, args = parser.parse_args()
# Check security settings
check_password_security(opt, args)
# Warn if quiet and verbosity are both specified
check_verbosity(opt)
if opt.verbosity is None:
verbosity = 0
else:
verbosity = opt.verbosity
quiet = opt.quiet
if opt.quiet is None:
quiet = False
if opt.hide_util and not opt.add_util:
# TODO: move to common/messages.py
parser.error("You can only use --hide_utils option along the "
"--add-util option.")
extra_utils_dict = {}
for utility in opt.add_util:
extra_utils_dict[utility] = ()
options = {
'verbosity': verbosity,
'quiet': quiet,
'width': opt.width,
'utildir': opt.utildir,
'variables': build_variable_dictionary_list(args),
'prompt': 'mysqluc> ',
'welcome': WELCOME_MESSAGE.format(VERSION_STRING, COPYRIGHT_FULL),
'goodbye': GOODBYE_MESSAGE,
'commands': opt.commands,
'custom': True, # We are using custom commands
'hide_util': opt.hide_util,
'add_util': extra_utils_dict
}
try:
print("Launching console ...")
util_con = UtilitiesConsole(options)
util_con.run_console()
except KeyboardInterrupt:
print(options['goodbye'])
except UtilError:
_, e, _ = sys.exc_info()
print("ERROR: %s" % e.errmsg)
sys.exit()
| gpl-2.0 | 4,047,960,684,611,629,000 | 34.523077 | 79 | 0.59795 | false |
erikvolz/idepi-ev0 | idepi/scripts/_sto2fa.py | 4 | 1544 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sto2fa.py :: converts a stockholm multiple sequence alignment file to fasta
# format.
#
# Copyright (C) 2011 N Lance Hepler <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, print_function
from argparse import ArgumentParser, FileType
from sys import argv, exit, stdin, stdout
from Bio import AlignIO
def main(args=None):
if args is None:
args = argv[1:]
parser = ArgumentParser(description='convert stockholm file to FASTA')
parser.add_argument('STOCKHOLMFILE', type=FileType('r'))
ns = parser.parse_args(args)
alignments = AlignIO.parse(ns.STOCKHOLMFILE, 'stockholm')
AlignIO.write(alignments, stdout, 'fasta')
if ns.STOCKHOLMFILE != stdin:
ns.STOCKHOLMFILE.close()
return 0
if __name__ == '__main__':
exit(main())
| gpl-3.0 | 53,738,623,089,066,160 | 28.692308 | 77 | 0.71956 | false |
zhujzhuo/openstack-trove | trove/common/remote.py | 3 | 7115 | # Copyright 2010-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils.importutils import import_class
from trove.common import cfg
from trove.common import exception
from trove.common.strategies.cluster import strategy
from cinderclient.v2 import client as CinderClient
from heatclient.v1 import client as HeatClient
from keystoneclient.service_catalog import ServiceCatalog
from novaclient.v2.client import Client
from swiftclient.client import Connection
CONF = cfg.CONF
PROXY_AUTH_URL = CONF.trove_auth_url
USE_SNET = CONF.backup_use_snet
def normalize_url(url):
"""Adds trailing slash if necessary."""
if not url.endswith('/'):
return '%(url)s/' % {'url': url}
else:
return url
def get_endpoint(service_catalog, service_type=None,
endpoint_region=CONF.os_region_name,
endpoint_type='publicURL'):
"""
Select an endpoint from the service catalog
We search the full service catalog for services
matching both type and region. The client is expected to
supply the region matching the service_type. There must
be one -- and only one -- successful match in the catalog,
otherwise we will raise an exception.
Some parts copied from glance/common/auth.py.
"""
if not service_catalog:
raise exception.EmptyCatalog()
# per IRC chat, X-Service-Catalog will be a v2 catalog regardless of token
# format; see https://bugs.launchpad.net/python-keystoneclient/+bug/1302970
# 'token' key necessary to get past factory validation
sc = ServiceCatalog.factory({'token': None,
'serviceCatalog': service_catalog})
urls = sc.get_urls(service_type=service_type, region_name=endpoint_region,
endpoint_type=endpoint_type)
if not urls:
raise exception.NoServiceEndpoint(service_type=service_type,
endpoint_region=endpoint_region,
endpoint_type=endpoint_type)
return urls[0]
def dns_client(context):
from trove.dns.manager import DnsManager
return DnsManager()
def guest_client(context, id, manager=None):
from trove.guestagent.api import API
if manager:
clazz = strategy.load_guestagent_strategy(manager).guest_client_class
else:
clazz = API
return clazz(context, id)
def nova_client(context):
if CONF.nova_compute_url:
url = '%(nova_url)s%(tenant)s' % {
'nova_url': normalize_url(CONF.nova_compute_url),
'tenant': context.tenant}
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.nova_compute_service_type,
endpoint_region=CONF.os_region_name,
endpoint_type=CONF.nova_compute_endpoint_type)
client = Client(context.user, context.auth_token,
bypass_url=url, project_id=context.tenant,
auth_url=PROXY_AUTH_URL)
client.client.auth_token = context.auth_token
client.client.management_url = url
return client
def create_admin_nova_client(context):
"""
Creates client that uses trove admin credentials
:return: a client for nova for the trove admin
"""
client = create_nova_client(context)
client.client.auth_token = None
return client
def cinder_client(context):
if CONF.cinder_url:
url = '%(cinder_url)s%(tenant)s' % {
'cinder_url': normalize_url(CONF.cinder_url),
'tenant': context.tenant}
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.cinder_service_type,
endpoint_region=CONF.os_region_name,
endpoint_type=CONF.cinder_endpoint_type)
client = CinderClient.Client(context.user, context.auth_token,
project_id=context.tenant,
auth_url=PROXY_AUTH_URL)
client.client.auth_token = context.auth_token
client.client.management_url = url
return client
def heat_client(context):
if CONF.heat_url:
url = '%(heat_url)s%(tenant)s' % {
'heat_url': normalize_url(CONF.heat_url),
'tenant': context.tenant}
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.heat_service_type,
endpoint_region=CONF.os_region_name,
endpoint_type=CONF.heat_endpoint_type)
client = HeatClient.Client(token=context.auth_token,
os_no_client_auth=True,
endpoint=url)
return client
def swift_client(context):
if CONF.swift_url:
# swift_url has a different format so doesn't need to be normalized
url = '%(swift_url)s%(tenant)s' % {'swift_url': CONF.swift_url,
'tenant': context.tenant}
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.swift_service_type,
endpoint_region=CONF.os_region_name,
endpoint_type=CONF.swift_endpoint_type)
client = Connection(preauthurl=url,
preauthtoken=context.auth_token,
tenant_name=context.tenant,
snet=USE_SNET)
return client
def neutron_client(context):
from neutronclient.v2_0 import client as NeutronClient
if CONF.neutron_url:
# neutron endpoint url / publicURL does not include tenant segment
url = CONF.neutron_url
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.neutron_service_type,
endpoint_region=CONF.os_region_name,
endpoint_type=CONF.neutron_endpoint_type)
client = NeutronClient.Client(token=context.auth_token,
endpoint_url=url)
return client
create_dns_client = import_class(CONF.remote_dns_client)
create_guest_client = import_class(CONF.remote_guest_client)
create_nova_client = import_class(CONF.remote_nova_client)
create_swift_client = import_class(CONF.remote_swift_client)
create_cinder_client = import_class(CONF.remote_cinder_client)
create_heat_client = import_class(CONF.remote_heat_client)
create_neutron_client = import_class(CONF.remote_neutron_client)
| apache-2.0 | 4,594,386,218,518,423,000 | 35.675258 | 79 | 0.628672 | false |
cartersgenes/shinysdr | shinysdr/blocks.py | 4 | 15963 | # Copyright 2013, 2014 Kevin Reid <[email protected]>
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
'''
GNU Radio flowgraph blocks for use by ShinySDR.
This module is not an external API and not guaranteed to have a stable
interface.
'''
# pylint: disable=attribute-defined-outside-init
# (attribute-defined-outside-init: doing it carefully)
from __future__ import absolute_import, division
import math
import os
import subprocess
from gnuradio import gr
from gnuradio import blocks
from gnuradio.fft import logpwrfft
from shinysdr.math import todB
from shinysdr.signals import SignalType
from shinysdr.types import BulkDataType, Range
from shinysdr.values import ExportedState, LooseCell, StreamCell, exported_value, setter
class RecursiveLockBlockMixin(object):
'''
For top blocks needing recursive locking and/or a notification to restart parts.
'''
__lock_count = 0
def _recursive_lock_hook(self):
''' override'''
pass
def _recursive_lock(self):
# gnuradio uses a non-recursive lock, which is not adequate for our purposes because we want to make changes locally or globally without worrying about having a single lock entry point
if self.__lock_count == 0:
self.lock()
self._recursive_lock_hook()
self.__lock_count += 1
def _recursive_unlock(self):
self.__lock_count -= 1
if self.__lock_count == 0:
self.unlock()
class Context(object):
'''
Client facet for RecursiveLockBlockMixin.
'''
def __init__(self, top):
self.__top = top
def lock(self):
self.__top._recursive_lock()
def unlock(self):
self.__top._recursive_unlock()
def rotator_inc(rate, shift):
'''
Calculation for using gnuradio.blocks.rotator_cc or other interfaces wanting radians/sample input.
rate: sample rate
shift: frequency shift in Hz
'''
return (2 * math.pi) * (shift / rate)
def make_sink_to_process_stdin(process, itemsize=gr.sizeof_char):
'''Given a twisted Process, connect a sink to its stdin.'''
fd_owned_by_twisted = process.pipes[0].fileno() # TODO: More public way to do this?
fd_owned_by_sink = os.dup(fd_owned_by_twisted)
process.closeStdin()
return blocks.file_descriptor_sink(itemsize, fd_owned_by_sink)
def test_subprocess(args, substring, shell=False):
'''Check the stdout or stderr of the specified command for a specified string.'''
# TODO: establish resource and output size limits
# TODO: Use Twisted facilities instead to avoid possible conflicts
try:
output = subprocess.check_output(
args=args,
shell=shell,
stderr=subprocess.STDOUT)
return substring in output
except OSError:
return False
except subprocess.CalledProcessError:
return False
class _NoContext(object):
def lock(self):
pass
def unlock(self):
pass
class MessageDistributorSink(gr.hier_block2):
'''Like gnuradio.blocks.message_sink, but copies its messages to a dynamic set of queues and saves the most recent item.
Never blocks.'''
def __init__(self, itemsize, context, migrate=None, notify=None):
gr.hier_block2.__init__(
self, self.__class__.__name__,
gr.io_signature(1, 1, itemsize),
gr.io_signature(0, 0, 0),
)
self.__itemsize = itemsize
self.__context = _NoContext()
self.__peek = blocks.probe_signal_vb(itemsize)
self.__subscriptions = {}
self.__notify = None
self.connect(self, self.__peek)
if migrate is not None:
assert isinstance(migrate, MessageDistributorSink) # sanity check
for queue in migrate.__subscriptions.keys():
migrate.unsubscribe(queue)
self.subscribe(queue)
# set now, not earlier, so as not to trigger anything while migrating
self.__context = context
self.__notify = notify
def get(self):
return self.__peek.level()
def get_subscription_count(self):
return len(self.__subscriptions)
def subscribe(self, queue):
assert queue not in self.__subscriptions
sink = blocks.message_sink(self.__itemsize, queue, True)
self.__subscriptions[queue] = sink
try:
self.__context.lock()
self.connect(self, sink)
finally:
self.__context.unlock()
if self.__notify:
self.__notify()
def unsubscribe(self, queue):
sink = self.__subscriptions[queue]
del self.__subscriptions[queue]
try:
self.__context.lock()
self.disconnect(self, sink)
finally:
self.__context.unlock()
if self.__notify:
self.__notify()
_maximum_fft_rate = 120
class _OverlapGimmick(gr.hier_block2):
'''
Pure flowgraph kludge to cause a logpwrfft block to perform overlapped FFTs.
The more correct solution would be to replace stream_to_vector_decimator (used inside of logpwrfft) with a block which takes arbitrarily-spaced vector chunks of the input rather than chunking and then decimating in terms of whole chunks. The cost of doing this instead is more scheduling steps and more data copies.
To adjust for the data rate, the logpwrfft block's sample rate parameter must be multiplied by the factor parameter of this block; or equivalently, the frame rate must be divided by it.
'''
def __init__(self, size, factor, itemsize=gr.sizeof_gr_complex):
'''
size: (int) vector size (FFT size) of next block
factor: (int) output will have this many more samples than input
If size is not divisible by factor, then the output will necessarily have jitter.
'''
size = int(size)
factor = int(factor)
# assert size % factor == 0
offset = size // factor
gr.hier_block2.__init__(
self, self.__class__.__name__,
gr.io_signature(1, 1, itemsize),
gr.io_signature(1, 1, itemsize),
)
if factor == 1:
# No duplication needed; simplify flowgraph
# GR refused to connect self to self, so insert a dummy block
self.connect(self, blocks.copy(itemsize), self)
else:
interleave = blocks.interleave(itemsize * size)
self.connect(
interleave,
blocks.vector_to_stream(itemsize, size),
self)
for i in xrange(0, factor):
self.connect(
self,
blocks.delay(itemsize, (factor - 1 - i) * offset),
blocks.stream_to_vector(itemsize, size),
(interleave, i))
class MonitorSink(gr.hier_block2, ExportedState):
'''
Convenience wrapper around all the bits and pieces to display the signal spectrum to the client.
The units of the FFT output are dB power/Hz (power spectral density) relative to unit amplitude (i.e. dBFS assuming the source clips at +/-1). Note this is different from the standard logpwrfft result of power _per bin_, which would be undesirably dependent on the sample rate and bin size.
'''
def __init__(self,
signal_type=None,
enable_scope=False,
freq_resolution=4096,
time_length=2048,
frame_rate=30.0,
input_center_freq=0.0,
paused=False,
context=None):
assert isinstance(signal_type, SignalType)
assert context is not None
itemsize = signal_type.get_itemsize()
gr.hier_block2.__init__(
self, self.__class__.__name__,
gr.io_signature(1, 1, itemsize),
gr.io_signature(0, 0, 0),
)
# constant parameters
self.__power_offset = 40 # TODO autoset or controllable
self.__itemsize = itemsize
self.__context = context
self.__enable_scope = enable_scope
# settable parameters
self.__signal_type = signal_type
self.__freq_resolution = int(freq_resolution)
self.__time_length = int(time_length)
self.__frame_rate = float(frame_rate)
self.__input_center_freq = float(input_center_freq)
self.__paused = bool(paused)
self.__interested_cell = LooseCell(key='interested', type=bool, value=False, writable=False, persists=False)
# blocks
self.__gate = None
self.__fft_sink = None
self.__scope_sink = None
self.__scope_chunker = None
self.__before_fft = None
self.__logpwrfft = None
self.__overlapper = None
self.__rebuild()
self.__connect()
def state_def(self, callback):
super(MonitorSink, self).state_def(callback)
# TODO make this possible to be decorator style
callback(StreamCell(self, 'fft', type=BulkDataType(array_format='b', info_format='dff')))
callback(StreamCell(self, 'scope', type=BulkDataType(array_format='f', info_format='d')))
def __rebuild(self):
if self.__signal_type.is_analytic():
input_length = self.__freq_resolution
output_length = self.__freq_resolution
self.__after_fft = None
else:
# use vector_to_streams to cut the output in half and discard the redundant part
input_length = self.__freq_resolution * 2
output_length = self.__freq_resolution
self.__after_fft = blocks.vector_to_streams(itemsize=output_length * gr.sizeof_float, nstreams=2)
sample_rate = self.__signal_type.get_sample_rate()
overlap_factor = int(math.ceil(_maximum_fft_rate * input_length / sample_rate))
# sanity limit -- OverlapGimmick is not free
overlap_factor = min(16, overlap_factor)
self.__gate = blocks.copy(gr.sizeof_gr_complex)
self.__gate.set_enabled(not self.__paused)
self.__fft_sink = MessageDistributorSink(
itemsize=output_length * gr.sizeof_char,
context=self.__context,
migrate=self.__fft_sink,
notify=self.__update_interested)
self.__overlapper = _OverlapGimmick(
size=input_length,
factor=overlap_factor,
itemsize=self.__itemsize)
# Adjusts units so displayed level is independent of resolution and sample rate. Also throw in the packing offset
compensation = todB(input_length / sample_rate) + self.__power_offset
# TODO: Consider not using the logpwrfft block
self.__logpwrfft = logpwrfft.logpwrfft_c(
sample_rate=sample_rate * overlap_factor,
fft_size=input_length,
ref_scale=10.0 ** (-compensation / 20.0) * 2, # not actually using this as a reference scale value but avoiding needing to use a separate add operation to apply the unit change -- this expression is the inverse of what logpwrfft does internally
frame_rate=self.__frame_rate,
avg_alpha=1.0,
average=False)
# It would make slightly more sense to use unsigned chars, but blocks.float_to_uchar does not support vlen.
self.__fft_converter = blocks.float_to_char(vlen=self.__freq_resolution, scale=1.0)
self.__scope_sink = MessageDistributorSink(
itemsize=self.__time_length * gr.sizeof_gr_complex,
context=self.__context,
migrate=self.__scope_sink,
notify=self.__update_interested)
self.__scope_chunker = blocks.stream_to_vector_decimator(
item_size=gr.sizeof_gr_complex,
sample_rate=sample_rate,
vec_rate=self.__frame_rate, # TODO doesn't need to be coupled
vec_len=self.__time_length)
def __connect(self):
self.__context.lock()
try:
self.disconnect_all()
self.connect(
self,
self.__gate,
self.__overlapper,
self.__logpwrfft)
if self.__after_fft is not None:
self.connect(self.__logpwrfft, self.__after_fft)
self.connect(self.__after_fft, self.__fft_converter, self.__fft_sink)
self.connect((self.__after_fft, 1), blocks.null_sink(gr.sizeof_float * self.__freq_resolution))
else:
self.connect(self.__logpwrfft, self.__fft_converter, self.__fft_sink)
if self.__enable_scope:
self.connect(
self.__gate,
self.__scope_chunker,
self.__scope_sink)
finally:
self.__context.unlock()
# non-exported
def get_interested_cell(self):
return self.__interested_cell
def __update_interested(self):
self.__interested_cell.set_internal(not self.__paused and (
self.__fft_sink.get_subscription_count() > 0 or
self.__scope_sink.get_subscription_count() > 0))
@exported_value()
def get_signal_type(self):
return self.__signal_type
# non-exported
def set_signal_type(self, value):
# TODO: don't rebuild if the rate did not change and the spectrum-sidedness of the type did not change
assert self.__signal_type.compatible_items(value)
self.__signal_type = value
self.__rebuild()
self.__connect()
# non-exported
def set_input_center_freq(self, value):
self.__input_center_freq = float(value)
@exported_value(type=Range([(2, 4096)], logarithmic=True, integer=True))
def get_freq_resolution(self):
return self.__freq_resolution
@setter
def set_freq_resolution(self, freq_resolution):
self.__freq_resolution = freq_resolution
self.__rebuild()
self.__connect()
@exported_value(type=Range([(1, 4096)], logarithmic=True, integer=True))
def get_time_length(self):
return self.__time_length
@setter
def set_time_length(self, value):
self.__time_length = value
self.__rebuild()
self.__connect()
@exported_value(type=Range([(1, _maximum_fft_rate)], logarithmic=True, integer=False))
def get_frame_rate(self):
return self.__frame_rate
@setter
def set_frame_rate(self, value):
self.__frame_rate = value
self.__logpwrfft.set_vec_rate(value)
@exported_value(type=bool)
def get_paused(self):
return self.__paused
@setter
def set_paused(self, value):
self.__paused = value
self.__gate.set_enabled(not value)
self.__update_interested()
# exported via state_def
def get_fft_info(self):
return (self.__input_center_freq, self.__signal_type.get_sample_rate(), self.__power_offset)
def get_fft_distributor(self):
return self.__fft_sink
# exported via state_def
def get_scope_info(self):
return (self.__signal_type.get_sample_rate(),)
def get_scope_distributor(self):
return self.__scope_sink
| gpl-3.0 | 8,295,159,403,019,445,000 | 35.115385 | 319 | 0.608407 | false |
brownharryb/erpnext | erpnext/hr/doctype/loan/loan.py | 7 | 8460 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, math, json
import erpnext
from frappe import _
from frappe.utils import flt, rounded, add_months, nowdate
from erpnext.controllers.accounts_controller import AccountsController
class Loan(AccountsController):
def validate(self):
check_repayment_method(self.repayment_method, self.loan_amount, self.monthly_repayment_amount, self.repayment_periods)
if not self.company:
self.company = erpnext.get_default_company()
if not self.posting_date:
self.posting_date = nowdate()
if self.loan_type and not self.rate_of_interest:
self.rate_of_interest = frappe.db.get_value("Loan Type", self.loan_type, "rate_of_interest")
if self.repayment_method == "Repay Over Number of Periods":
self.monthly_repayment_amount = get_monthly_repayment_amount(self.repayment_method, self.loan_amount, self.rate_of_interest, self.repayment_periods)
if self.status == "Repaid/Closed":
self.total_amount_paid = self.total_payment
if self.status == 'Disbursed' and self.repayment_start_date < self.disbursement_date:
frappe.throw(_("Repayment Start Date cannot be before Disbursement Date."))
if self.status == "Disbursed":
self.make_repayment_schedule()
self.set_repayment_period()
self.calculate_totals()
def make_jv_entry(self):
self.check_permission('write')
journal_entry = frappe.new_doc('Journal Entry')
journal_entry.voucher_type = 'Bank Entry'
journal_entry.user_remark = _('Against Loan: {0}').format(self.name)
journal_entry.company = self.company
journal_entry.posting_date = nowdate()
account_amt_list = []
account_amt_list.append({
"account": self.loan_account,
"party_type": self.applicant_type,
"party": self.applicant,
"debit_in_account_currency": self.loan_amount,
"reference_type": "Loan",
"reference_name": self.name,
})
account_amt_list.append({
"account": self.payment_account,
"credit_in_account_currency": self.loan_amount,
"reference_type": "Loan",
"reference_name": self.name,
})
journal_entry.set("accounts", account_amt_list)
return journal_entry.as_dict()
def make_repayment_schedule(self):
self.repayment_schedule = []
payment_date = self.repayment_start_date
balance_amount = self.loan_amount
while(balance_amount > 0):
interest_amount = rounded(balance_amount * flt(self.rate_of_interest) / (12*100))
principal_amount = self.monthly_repayment_amount - interest_amount
balance_amount = rounded(balance_amount + interest_amount - self.monthly_repayment_amount)
if balance_amount < 0:
principal_amount += balance_amount
balance_amount = 0.0
total_payment = principal_amount + interest_amount
self.append("repayment_schedule", {
"payment_date": payment_date,
"principal_amount": principal_amount,
"interest_amount": interest_amount,
"total_payment": total_payment,
"balance_loan_amount": balance_amount
})
next_payment_date = add_months(payment_date, 1)
payment_date = next_payment_date
def set_repayment_period(self):
if self.repayment_method == "Repay Fixed Amount per Period":
repayment_periods = len(self.repayment_schedule)
self.repayment_periods = repayment_periods
def calculate_totals(self):
self.total_payment = 0
self.total_interest_payable = 0
self.total_amount_paid = 0
for data in self.repayment_schedule:
self.total_payment += data.total_payment
self.total_interest_payable +=data.interest_amount
if data.paid:
self.total_amount_paid += data.total_payment
def update_total_amount_paid(doc):
total_amount_paid = 0
for data in doc.repayment_schedule:
if data.paid:
total_amount_paid += data.total_payment
frappe.db.set_value("Loan", doc.name, "total_amount_paid", total_amount_paid)
def update_disbursement_status(doc):
disbursement = frappe.db.sql("""select posting_date, ifnull(sum(credit_in_account_currency), 0) as disbursed_amount
from `tabGL Entry` where account = %s and against_voucher_type = 'Loan' and against_voucher = %s""",
(doc.payment_account, doc.name), as_dict=1)[0]
if disbursement.disbursed_amount == doc.loan_amount:
frappe.db.set_value("Loan", doc.name , "status", "Disbursed")
if disbursement.disbursed_amount == 0:
frappe.db.set_value("Loan", doc.name , "status", "Sanctioned")
if disbursement.disbursed_amount > doc.loan_amount:
frappe.throw(_("Disbursed Amount cannot be greater than Loan Amount {0}").format(doc.loan_amount))
if disbursement.disbursed_amount > 0:
frappe.db.set_value("Loan", doc.name , "disbursement_date", disbursement.posting_date)
frappe.db.set_value("Loan", doc.name , "repayment_start_date", disbursement.posting_date)
def check_repayment_method(repayment_method, loan_amount, monthly_repayment_amount, repayment_periods):
if repayment_method == "Repay Over Number of Periods" and not repayment_periods:
frappe.throw(_("Please enter Repayment Periods"))
if repayment_method == "Repay Fixed Amount per Period":
if not monthly_repayment_amount:
frappe.throw(_("Please enter repayment Amount"))
if monthly_repayment_amount > loan_amount:
frappe.throw(_("Monthly Repayment Amount cannot be greater than Loan Amount"))
def get_monthly_repayment_amount(repayment_method, loan_amount, rate_of_interest, repayment_periods):
if rate_of_interest:
monthly_interest_rate = flt(rate_of_interest) / (12 *100)
monthly_repayment_amount = math.ceil((loan_amount * monthly_interest_rate *
(1 + monthly_interest_rate)**repayment_periods) \
/ ((1 + monthly_interest_rate)**repayment_periods - 1))
else:
monthly_repayment_amount = math.ceil(flt(loan_amount) / repayment_periods)
return monthly_repayment_amount
@frappe.whitelist()
def get_loan_application(loan_application):
loan = frappe.get_doc("Loan Application", loan_application)
if loan:
return loan.as_dict()
@frappe.whitelist()
def make_repayment_entry(payment_rows, loan, company, loan_account, applicant_type, applicant, \
payment_account=None, interest_income_account=None):
if isinstance(payment_rows, frappe.string_types):
payment_rows_list = json.loads(payment_rows)
else:
frappe.throw(_("No repayments available for Journal Entry"))
if payment_rows_list:
row_name = list(set(d["name"] for d in payment_rows_list))
else:
frappe.throw(_("No repayments selected for Journal Entry"))
total_payment = 0
principal_amount = 0
interest_amount = 0
for d in payment_rows_list:
total_payment += d["total_payment"]
principal_amount += d["principal_amount"]
interest_amount += d["interest_amount"]
journal_entry = frappe.new_doc('Journal Entry')
journal_entry.voucher_type = 'Bank Entry'
journal_entry.user_remark = _('Against Loan: {0}').format(loan)
journal_entry.company = company
journal_entry.posting_date = nowdate()
journal_entry.paid_loan = json.dumps(row_name)
account_amt_list = []
account_amt_list.append({
"account": payment_account,
"debit_in_account_currency": total_payment,
"reference_type": "Loan",
"reference_name": loan,
})
account_amt_list.append({
"account": loan_account,
"credit_in_account_currency": principal_amount,
"party_type": applicant_type,
"party": applicant,
"reference_type": "Loan",
"reference_name": loan,
})
account_amt_list.append({
"account": interest_income_account,
"credit_in_account_currency": interest_amount,
"reference_type": "Loan",
"reference_name": loan,
})
journal_entry.set("accounts", account_amt_list)
return journal_entry.as_dict()
@frappe.whitelist()
def make_jv_entry(loan, company, loan_account, applicant_type, applicant, loan_amount,payment_account=None):
journal_entry = frappe.new_doc('Journal Entry')
journal_entry.voucher_type = 'Bank Entry'
journal_entry.user_remark = _('Against Loan: {0}').format(loan)
journal_entry.company = company
journal_entry.posting_date = nowdate()
account_amt_list = []
account_amt_list.append({
"account": loan_account,
"debit_in_account_currency": loan_amount,
"party_type": applicant_type,
"party": applicant,
"reference_type": "Loan",
"reference_name": loan,
})
account_amt_list.append({
"account": payment_account,
"credit_in_account_currency": loan_amount,
"reference_type": "Loan",
"reference_name": loan,
})
journal_entry.set("accounts", account_amt_list)
return journal_entry.as_dict() | gpl-3.0 | -408,841,644,022,723,100 | 36.604444 | 151 | 0.724232 | false |
daniilidis-group/spherical-cnn | spherical_cnn/spherical.py | 1 | 10142 | """ Spherical harmonics transforms/inverses and spherical convolution. """
import functools
import tensorflow as tf
import numpy as np
from scipy.special import sph_harm
from .util import safe_cast
from . import util
from . import tfnp_compatibility as tfnp
from .tfnp_compatibility import istf
# cache outputs; 2050 > 32*64
@functools.lru_cache(maxsize=2050, typed=False)
def sph_harm_lm(l, m, n):
""" Wrapper around scipy.special.sph_harm. Return spherical harmonic of degree l and order m. """
phi, theta = util.sph_sample(n)
phi, theta = np.meshgrid(phi, theta)
f = sph_harm(m, l, theta, phi)
return f
def sph_harm_all(n, as_tfvar=False, real=False):
""" Compute spherical harmonics for an n x n input (degree up to n // 2)
Args:
n (int): input dimensions; order will be n // 2
as_tfvar (bool): if True, return as list of tensorflow Variables.
real (bool): if True, return real harmonics
"""
harmonics = []
for l in range(n // 2):
if real:
minl = 0
else:
minl = -l
row = []
for m in range(minl, l+1):
row.append(sph_harm_lm(l, m, n))
harmonics.append(row)
if as_tfvar:
return tf.cast(tf.constant(sph_harm_to_shtools(harmonics)),
'complex64')
else:
return harmonics
def DHaj(n, mode='DH'):
""" Sampling weights. """
# Driscoll and Healy sampling weights (on the phi dimension)
# note: weights depend on the chosen grid, given by sph_sample
if mode == 'DH':
gridfun = lambda j: np.pi*j/n
elif mode == 'ours':
gridfun = lambda j: np.pi*(2*j+1)/2/n
else:
raise NotImplementedError()
l = np.arange(0, n/2)
a = [(2*np.sqrt(2)/n *
np.sin(gridfun(j)) *
(1/(2*l+1) * np.sin((2*l+1)*gridfun(j))).sum())
for j in range(n)]
return a
def sph_harm_transform(f, mode='DH', harmonics=None):
""" Project spherical function into the spherical harmonics basis. """
assert f.shape[0] == f.shape[1]
if isinstance(f, tf.Tensor):
sumfun = tf.reduce_sum
conjfun = lambda x: tf.conj(x)
n = f.shape[0].value
else:
sumfun = np.sum
conjfun = np.conj
n = f.shape[0]
assert np.log2(n).is_integer()
if harmonics is None:
harmonics = sph_harm_all(n)
a = DHaj(n, mode)
f = f*np.array(a)[np.newaxis, :]
real = is_real_sft(harmonics)
coeffs = []
for l in range(n // 2):
row = []
minl = 0 if real else -l
for m in range(minl, l+1):
# WARNING: results are off by this factor, when using driscoll1994computing formulas
factor = 2*np.sqrt(np.pi)
row.append(sumfun(factor * np.sqrt(2*np.pi)/n * f * conjfun(harmonics[l][m-minl])))
coeffs.append(row)
return coeffs
def sph_harm_inverse(c, harmonics=None):
""" Inverse spherical harmonics transform. """
n = 2*len(c)
real = is_real_sft(c)
dtype = 'float32' if real else c[1][1].dtype
if harmonics is None:
harmonics = sph_harm_all(n, real=real)
if isinstance(c[0][0], tf.Tensor):
f = tf.zeros((n, n), dtype=dtype)
else:
f = np.zeros((n, n), dtype=dtype)
for l in range(n // 2):
lenm = l+1 if real else 2*l+1
for m in range(lenm):
if real:
# leverage symmetry of coefficients and harmonics
factor = 1 if m == 0 else 2
f += factor*(tfnp.real(c[l][m]) * tfnp.real(harmonics[l][m]) -
tfnp.imag(c[l][m]) * tfnp.imag(harmonics[l][m]))
else:
f += c[l][m] * harmonics[l][m]
return f
def sph_harm_transform_batch(f, method=None, *args, **kwargs):
return sph_harm_transform_batch_naive(f, *args, **kwargs)
def sph_harm_inverse_batch(c, method=None, *args, **kwargs):
return sph_harm_inverse_batch_naive(c, *args, **kwargs)
def sph_harm_transform_batch_naive(f, harmonics=None, m0_only=False):
""" Spherical harmonics batch-transform.
Args:
f (n, l, l, c)-array : functions are on l x l grid
harmonics (2, l/2, l/2, l, l)-array:
m0_only (bool): return only coefficients with order 0;
only them are needed when computing convolutions
Returns:
coeffs ((n, 2, l/2, l/2, c)-array):
"""
shapef = tfnp.shape(f)
n, l = shapef[:2]
assert shapef[2] == l
if harmonics is None:
harmonics = sph_harm_to_shtools(sph_harm_all(l))
shapeh = tfnp.shape(harmonics)
assert shapeh[1:] == (l//2, l//2, l, l)
assert shapeh[0] in [1, 2]
aj = np.array(DHaj(l))
# returns m=0 only; useful to expand kernel in spherical convolution
if m0_only:
harmonics = harmonics[slice(0, 1), :, slice(0, 1), ...]
na = np.newaxis
coeffs = tfnp.transpose(2*np.sqrt(2)*np.pi/l *
tfnp.dot(f * aj[na, na, :, na],
tfnp.conj(harmonics),
[[1, 2], [3, 4]]),
(0, 2, 3, 4, 1))
return coeffs
def sph_harm_inverse_batch_naive(c, harmonics=None):
""" Spherical harmonics batch inverse transform.
Args:
c ((n, 2, l/2, l/2, c)-array): sph harm coefficients; max degree is l/2
harmonics (2, l/2, l/2, l, l)-array:
Returns:
recons ((n, l, l, c)-array):
"""
shapec = tfnp.shape(c)
l = 2*shapec[2]
assert shapec[3] == l//2
if harmonics is None:
harmonics = sph_harm_to_shtools(sph_harm_all(l))
shapeh = tfnp.shape(harmonics)
assert shapeh[1:] == (l//2, l//2, l, l)
assert shapeh[0] in [1, 2]
real = True if shapeh[0] == 1 else False
na = np.newaxis
if real:
# using m, -m symmetry:
# c^{-m}Y^{-m} + c^mY^m = 2(Re(c^{m})Re(Y^m) - Im(c^{m})Im(Y^m))
# that does not apply to c_0 so we compensate by dividing it by two
factor = np.ones(tfnp.shape(c)[1:])[np.newaxis, ...]
factor[..., 0, :] = factor[..., 0, :]/2
c = c * factor
# c[..., 0, :] = c[..., 0, :]/2
recons = tfnp.transpose(2*(tfnp.dot(tfnp.real(c), tfnp.real(harmonics),
[[1, 2, 3], [0, 1, 2]]) -
tfnp.dot(tfnp.imag(c), tfnp.imag(harmonics),
[[1, 2, 3], [0, 1, 2]])),
(0, 2, 3, 1))
else:
recons = tfnp.transpose(tfnp.dot(c, harmonics,
[[1, 2, 3], [0, 1, 2]]),
(0, 2, 3, 1))
return recons
def sph_conv(f, g, harmonics=None):
""" Spherical convolution f * g. """
stackfun = tf.stack if isinstance(f, tf.Tensor) else np.array
cf, cg = [sph_harm_transform(x, harmonics=harmonics) for x in [f, g]]
cfg = [2*np.pi*np.sqrt(4*np.pi / (2*l+1)) * stackfun(c1) * c2[l]
for l, (c1, c2) in enumerate(zip(cf, cg))]
return sph_harm_inverse(cfg)
def sph_conv_batch(f, g,
harmonics_or_legendre=None,
method=None,
spectral_pool=0,
harmonics_or_legendre_low=None):
""" CNN-like batch spherical convolution.
Args:
f (n, l, l, c)-array: input feature map. n entries, c channels
g (c, l, l, d)-array: convolution kernels
harmonics_or_legendre (): spherical harmonics or legendre polynomials to expand f and g
method (str): see sph_harm_transform_batch
spectral_pool (int): if > 0 run spectral pooling before ISHT
(bandwidth is reduced by a factor of 2**spectral_pool)
harmonics_or_legendre_low (): low frequency harmonics of legendre to be used when spectral_pool==True
Returns:
fg (n, l, l, d)-array
"""
shapef, shapeg = [tfnp.shape(x) for x in [f, g]]
spectral_filter = True if len(shapeg) == 5 else False
spectral_input = True if len(shapef) == 5 else False
n = shapef[2]
if spectral_input:
n *= 2
if not spectral_input:
cf = sph_harm_transform_batch(f, method, harmonics_or_legendre, m0_only=False)
else:
cf = f
if not spectral_filter:
cg = sph_harm_transform_batch(g, method, harmonics_or_legendre, m0_only=True)
else:
cg = g
shapecf, shapecg = [tfnp.shape(x) for x in [cf, cg]]
assert shapecf[4] == shapecg[0]
assert shapecf[2] == shapecg[2]
na = np.newaxis
# per degree factor
factor = (2*np.pi*np.sqrt(4*np.pi / (2*np.arange(n/2)+1)))[na, na, :, na, na, na]
cg = tfnp.transpose(cg, (1, 2, 3, 0, 4))[na, ...]
cf = cf[..., na]
if istf(cg) and istf(cf):
cg, cf = safe_cast(cg, cf)
cfg = factor * cf * cg
else:
cfg = factor * cf * cg
if spectral_pool > 0:
cfg = cfg[:, :, :n//(2**(spectral_pool+1)), :n//(2**(spectral_pool+1)), ...]
hol = harmonics_or_legendre_low
else:
hol = harmonics_or_legendre
# sum over channels
cfg = tfnp.sum(cfg, axis=-2)
return sph_harm_inverse_batch(cfg, method, hol)
def is_real_sft(h_or_c):
""" Detect if list of lists of harmonics or coefficients assumes real inputs (m>0) """
if istf(h_or_c):
d = tfnp.shape(h_or_c[1])[0]
else:
d = len(h_or_c[1])
isreal = True if d == 2 else False
return isreal
def sph_harm_to_shtools(c):
""" Convert our list format for the sph harm coefficients/harmonics to pyshtools (2, n, n) format. """
n = len(c)
real = is_real_sft(c)
dim1 = 1 if real else 2
out = np.zeros((dim1, n, n, *c[0][0].shape)) + 0j
for l, cc in enumerate(c):
cc = np.array(cc)
if not real:
m_minus = cc[:l][::-1]
m_plus = cc[l:]
else:
m_minus = np.array([])
m_plus = cc
# we get warnings here when using reals
if m_minus.size > 0:
out[1, l, 1:l+1, ...] = m_minus
out[0, l, :l+1, ...] = m_plus
return out
| mit | -3,424,267,444,063,836,000 | 29.548193 | 109 | 0.540722 | false |
openstack/murano | murano/policy/model_policy_enforcer.py | 1 | 6097 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
try:
# integration with congress is optional
import congressclient.v1.client as congress_client
except ImportError:
congress_client = None
from oslo_log import log as logging
from murano.common import auth_utils
from murano.common.i18n import _
from murano.policy import congress_rules
from murano.policy.modify.actions import action_manager as am
LOG = logging.getLogger(__name__)
class ValidationError(Exception):
"""Raised for validation errors."""
pass
class ModelPolicyEnforcer(object):
"""Policy Enforcer Implementation using Congress client
Converts murano model to list of congress data rules.
We ask congress using simulation api of congress rest client
to resolve "murano_system:predeploy_errors(env_id, obj_id, msg)"
table along with congress data rules to return validation results.
"""
def __init__(self, execution_session, action_manager=None):
self._execution_session = execution_session
self._action_manager = action_manager or am.ModifyActionManager()
self._client = None
def _create_client(self):
if not congress_client:
# congress client was not imported
raise ImportError("Import congresscliet error")
return congress_client.Client(
**auth_utils.get_session_client_parameters(
service_type='policy',
execution_session=self._execution_session))
@property
def client(self):
if self._client is None:
self._client = self._create_client()
return self._client
def modify(self, obj, package_loader=None):
"""Modifies model using Congress rule engine.
@type obj: object model
@param obj: Representation of model starting on
environment level (['Objects'])
@type class_loader: murano.dsl.class_loader.MuranoClassLoader
@param class_loader: Optional. Used for evaluating parent class types
@raises ValidationError in case validation was not successful
"""
model = obj.to_dictionary()
LOG.debug('Modifying model')
LOG.debug(model)
env_id = model['?']['id']
result = self._execute_simulation(package_loader, env_id, model,
'predeploy_modify(eid, oid, action)')
raw_actions = result["result"]
if raw_actions:
actions = self._parse_simulation_result('predeploy_modify',
env_id, raw_actions)
for action in actions:
self._action_manager.apply_action(obj, action)
def validate(self, model, package_loader=None):
"""Validate model using Congress rule engine.
@type model: dict
@param model: Dictionary representation of model starting on
environment level (['Objects'])
@type package_loader: murano.dsl.package_loader.MuranoPackageLoader
@param package_loader: Optional. Used for evaluating parent class types
@raises ValidationError in case validation was not successful
"""
if model is None:
return
env_id = model['?']['id']
validation_result = self._execute_simulation(
package_loader, env_id, model,
'predeploy_errors(eid, oid, msg)')
if validation_result["result"]:
messages = self._parse_simulation_result(
'predeploy_errors', env_id,
validation_result["result"])
if messages:
result_str = "\n ".join(map(str, messages))
msg = _("Murano object model validation failed: {0}").format(
"\n " + result_str)
LOG.error(msg)
raise ValidationError(msg)
else:
LOG.info('Model valid')
def _execute_simulation(self, package_loader, env_id, model, query):
rules = congress_rules.CongressRulesManager().convert(
model, package_loader, self._execution_session.project_id)
rules_str = list(map(str, rules))
# cleanup of data populated by murano driver
rules_str.insert(0, 'deleteEnv("{0}")'.format(env_id))
rules_line = " ".join(rules_str)
LOG.debug('Congress rules: \n {rules} '
.format(rules='\n '.join(rules_str)))
validation_result = self.client.execute_policy_action(
"murano_system",
"simulate",
False,
False,
{'query': query,
'action_policy': 'murano_action',
'sequence': rules_line})
return validation_result
@staticmethod
def _parse_simulation_result(query, env_id, results):
"""Transforms the list of results
Transforms a list of strings in format:
['predeploy_errors("env_id_1", "obj_id_1", "message1")',
'predeploy_errors("env_id_2", "obj_id_2", "message2")']
to a list of strings with message only filtered to provided
env_id (e.g. 'env_id_1'):
['message2']
"""
messages = []
regexp = query + '\("([^"]*)",\s*"([^"]*)",\s*"(.*)"\)'
for result in results:
match = re.search(regexp, result)
if match:
if env_id in match.group(1):
messages.append(match.group(3))
return messages
| apache-2.0 | 7,315,240,430,330,293,000 | 33.84 | 79 | 0.607512 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.