gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains helper methods used in creating a release.
import re
import sys
from subprocess import Popen, PIPE
try:
from jira.client import JIRA # noqa: F401
# Old versions have JIRAError in exceptions package, new (0.5+) in utils.
try:
from jira.exceptions import JIRAError
except ImportError:
from jira.utils import JIRAError
except ImportError:
print("This tool requires the jira-python library")
print("Install using 'sudo pip3 install jira'")
sys.exit(-1)
try:
from github import Github # noqa: F401
from github import GithubException
except ImportError:
print("This tool requires the PyGithub library")
print("Install using 'sudo pip install PyGithub'")
sys.exit(-1)
# Contributors list file name
contributors_file_name = "contributors.txt"
# Prompt the user to answer yes or no until they do so
def yesOrNoPrompt(msg):
response = input("%s [y/n]: " % msg)
while response != "y" and response != "n":
return yesOrNoPrompt(msg)
return response == "y"
# Utility functions run git commands (written with Git 1.8.5)
def run_cmd(cmd):
return Popen(cmd, stdout=PIPE).communicate()[0].decode("utf8")
def run_cmd_error(cmd):
return Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()[1].decode("utf8")
def get_date(commit_hash):
return run_cmd(["git", "show", "--quiet", "--pretty=format:%cd", commit_hash])
def tag_exists(tag):
stderr = run_cmd_error(["git", "show", tag])
return "error" not in stderr
# A type-safe representation of a commit
class Commit:
def __init__(self, _hash, author, title, pr_number=None):
self._hash = _hash
self.author = author
self.title = title
self.pr_number = pr_number
def get_hash(self):
return self._hash
def get_author(self):
return self.author
def get_title(self):
return self.title
def get_pr_number(self):
return self.pr_number
def __str__(self):
closes_pr = "(Closes #%s)" % self.pr_number if self.pr_number else ""
return "%s %s %s %s" % (self._hash, self.author, self.title, closes_pr)
# Return all commits that belong to the specified tag.
#
# Under the hood, this runs a `git log` on that tag and parses the fields
# from the command output to construct a list of Commit objects. Note that
# because certain fields reside in the commit description and cannot be parsed
# through the GitHub API itself, we need to do some intelligent regex parsing
# to extract those fields.
#
# This is written using Git 1.8.5.
def get_commits(tag):
commit_start_marker = "|=== COMMIT START MARKER ===|"
commit_end_marker = "|=== COMMIT END MARKER ===|"
field_end_marker = "|=== COMMIT FIELD END MARKER ===|"
log_format =\
commit_start_marker + "%h" +\
field_end_marker + "%an" +\
field_end_marker + "%s" +\
commit_end_marker + "%b"
output = run_cmd(["git", "log", "--quiet", "--pretty=format:" + log_format, tag])
commits = []
raw_commits = [c for c in output.split(commit_start_marker) if c]
for commit in raw_commits:
if commit.count(commit_end_marker) != 1:
print("Commit end marker not found in commit: ")
for line in commit.split("\n"):
print(line)
sys.exit(1)
# Separate commit digest from the body
# From the digest we extract the hash, author and the title
# From the body, we extract the PR number and the github username
[commit_digest, commit_body] = commit.split(commit_end_marker)
if commit_digest.count(field_end_marker) != 2:
sys.exit("Unexpected format in commit: %s" % commit_digest)
[_hash, author, title] = commit_digest.split(field_end_marker)
# The PR number and github username is in the commit message
# itself and cannot be accessed through any GitHub API
pr_number = None
match = re.search("Closes #([0-9]+) from ([^/\\s]+)/", commit_body)
if match:
[pr_number, github_username] = match.groups()
# If the author name is not valid, use the github
# username so we can translate it properly later
if not is_valid_author(author):
author = github_username
author = author.strip()
commit = Commit(_hash, author, title, pr_number)
commits.append(commit)
return commits
# Maintain a mapping for translating issue types to contributions in the release notes
# This serves an additional function of warning the user against unknown issue types
# Note: This list is partially derived from this link:
# https://issues.apache.org/jira/plugins/servlet/project-config/SPARK/issuetypes
# Keep these in lower case
known_issue_types = {
"bug": "bug fixes",
"build": "build fixes",
"dependency upgrade": "build fixes",
"improvement": "improvements",
"new feature": "new features",
"documentation": "documentation",
"test": "test",
"task": "improvement",
"sub-task": "improvement"
}
# Maintain a mapping for translating component names when creating the release notes
# This serves an additional function of warning the user against unknown components
# Note: This list is largely derived from this link:
# https://issues.apache.org/jira/plugins/servlet/project-config/SPARK/components
CORE_COMPONENT = "Core"
known_components = {
"block manager": CORE_COMPONENT,
"build": CORE_COMPONENT,
"deploy": CORE_COMPONENT,
"documentation": CORE_COMPONENT,
"examples": CORE_COMPONENT,
"graphx": "GraphX",
"input/output": CORE_COMPONENT,
"java api": "Java API",
"k8s": "Kubernetes",
"kubernetes": "Kubernetes",
"mesos": "Mesos",
"ml": "MLlib",
"mllib": "MLlib",
"project infra": "Project Infra",
"pyspark": "PySpark",
"shuffle": "Shuffle",
"spark core": CORE_COMPONENT,
"spark shell": CORE_COMPONENT,
"sql": "SQL",
"streaming": "Streaming",
"web ui": "Web UI",
"windows": "Windows",
"yarn": "YARN"
}
# Translate issue types using a format appropriate for writing contributions
# If an unknown issue type is encountered, warn the user
def translate_issue_type(issue_type, issue_id, warnings):
issue_type = issue_type.lower()
if issue_type in known_issue_types:
return known_issue_types[issue_type]
else:
warnings.append("Unknown issue type \"%s\" (see %s)" % (issue_type, issue_id))
return issue_type
# Translate component names using a format appropriate for writing contributions
# If an unknown component is encountered, warn the user
def translate_component(component, commit_hash, warnings):
component = component.lower()
if component in known_components:
return known_components[component]
else:
warnings.append("Unknown component \"%s\" (see %s)" % (component, commit_hash))
return component
# Parse components in the commit message
# The returned components are already filtered and translated
def find_components(commit, commit_hash):
components = re.findall(r"\[\w*\]", commit.lower())
components = [translate_component(c, commit_hash, [])
for c in components if c in known_components]
return components
# Join a list of strings in a human-readable manner
# e.g. ["Juice"] -> "Juice"
# e.g. ["Juice", "baby"] -> "Juice and baby"
# e.g. ["Juice", "baby", "moon"] -> "Juice, baby, and moon"
def nice_join(str_list):
str_list = list(str_list) # sometimes it's a set
if not str_list:
return ""
elif len(str_list) == 1:
return next(iter(str_list))
elif len(str_list) == 2:
return " and ".join(str_list)
else:
return ", ".join(str_list[:-1]) + ", and " + str_list[-1]
# Return the full name of the specified user on GitHub
# If the user doesn't exist, return None
def get_github_name(author, github_client):
if github_client:
try:
return github_client.get_user(author).name
except GithubException as e:
# If this is not a "not found" exception
if e.status != 404:
raise e
return None
# Return the full name of the specified user on JIRA
# If the user doesn't exist, return None
def get_jira_name(author, jira_client):
if jira_client:
try:
return jira_client.user(author).displayName
except JIRAError as e:
# If this is not a "not found" exception
if e.status_code != 404:
raise e
return None
# Return whether the given name is in the form <First Name><space><Last Name>
def is_valid_author(author):
if not author:
return False
return " " in author and not re.findall("[0-9]", author)
# Capitalize the first letter of each word in the given author name
def capitalize_author(author):
if not author:
return None
words = author.split(" ")
words = [w[0].capitalize() + w[1:] for w in words if w]
return " ".join(words)
|
|
#!/usr/bin/python
# Disable some pylint messages
# pylint: disable=C0103,R0201,R0904,W0511
# C0103 : Invalid name "%s" (should match %s)
# W0212 : Access to a protected member %s of a client class
# R0201 : Method could be a function
# R0904 : Too many public methods
# W0511 : TODO/FIXME/XXX
"""
Quaternion definitions and useful utilities.
"""
import math
import unittest
from Vector import Vector
#from Matrix import Matrix
########################################################################
# Quaternion
class Quaternion:
"""Representation of a quaternion, defined as:
s + ai + bj + ck
or
[s,v]
where s,a,b,c are scalars, v is a vector,
and i, j, k are defined such that:
i^2 = j^2 = k^2 = ijk = -1
ij = k, jk = i, ki = j
ji = -k, kj = -i, ik = -j
"""
def __init__(self, s, a, b, c):
self.mPrintSpec = '%f'
self.mScalar = s
self.mVector = Vector(a, b, c)
@staticmethod
def fromScalarVector(scalar, vector):
"""Define a quaternion from a scalar and a vector."""
# TODO: Refactor for performance.
return Quaternion(scalar, vector[0], vector[1], vector[2])
def clone(self):
v = self.mVector[:]
return Quaternion(self.mScalar, v[0], v[1], v[2])
def __str__(self):
return '[ %s, %s ]' % (self.mPrintSpec % self.mScalar, self.mVector)
def str2(self):
"""Alternate way to represent a Quaternion as a string."""
signs = [ ('+' if f >= 0 else '-') for f in self.mVector ]
vals = [ abs(f) for f in self.mVector ]
return '%s %s %si %s %sj %s %sk' % (self.mScalar,
signs[0],
vals[0],
signs[1],
vals[1],
signs[2],
vals[2])
def __eq__(self, q):
'Equality operator.'
return self.mScalar == q.mScalar and self.mVector == q.mVector
def __ne__(self, q):
'Not equals'
return not self.__eq__(q)
def compare(self, seq):
"""Compare the quaternion to a sequence assumed to be in
the form [ s, a, b, c ]."""
return (len(seq) == 4 and
self.mScalar == seq[0] and self.mVector[0] == seq[1] and
self.mVector[1] == seq[2] and self.mVector[2] == seq[3])
def __add__(self, q):
'Return self + q'
return Quaternion(self.mScalar + q.mScalar,
self.mVector[0] + q.mVector[0],
self.mVector[1] + q.mVector[1],
self.mVector[2] + q.mVector[2])
def __sub__(self, q):
'Return self - q'
return Quaternion(self.mScalar - q.mScalar,
self.mVector[0] - q.mVector[0],
self.mVector[1] - q.mVector[1],
self.mVector[2] - q.mVector[2])
def scale(self, s):
'Scale this quaternion by scalar s in-place.'
self.mScalar = self.mScalar * float(s)
self.mVector.scale(s)
def mults(self, s):
'Return self * scalar as a new Quaternion.'
r = Quaternion.fromScalarVector(self.mScalar, self.mVector)
r.scale(s)
return r
def mul1(self, q):
"""Multiplication Algorithm 1:
This is a very nice definition of the quaternion multiplication
operator, but it is terribly inefficient."""
s = self.mScalar * q.mScalar - self.mVector.dot(q.mVector)
v = q.mVector.mults(self.mScalar) + \
self.mVector.mults(q.mScalar) + \
self.mVector.cross(q.mVector)
return Quaternion.fromScalarVector(s, v)
def mul2(self, q):
"""Multiplication Algorithm 2: This is a much more efficient
implementation of quaternion multiplication. It isover 3x faster than
mul1."""
s = (self.mScalar * q.mScalar - self.mVector[0] * q.mVector[0] -
self.mVector[1] * q.mVector[1] - self.mVector[2] * q.mVector[2])
a = (self.mScalar * q.mVector[0] + self.mVector[0] * q.mScalar +
self.mVector[1] * q.mVector[2] - self.mVector[2] * q.mVector[1])
b = (self.mScalar * q.mVector[1] - self.mVector[0] * q.mVector[2] +
self.mVector[1] * q.mScalar + self.mVector[2] * q.mVector[0])
c = (self.mScalar * q.mVector[2] + self.mVector[0] * q.mVector[1] -
self.mVector[1] * q.mVector[0] + self.mVector[2] * q.mScalar)
return Quaternion(s, a, b, c)
def mulq(self, q):
"Multiply two quaternions and return a new quaternion product."
s = (self.mScalar * q.mScalar - self.mVector[0] * q.mVector[0] -
self.mVector[1] * q.mVector[1] - self.mVector[2] * q.mVector[2])
a = (self.mScalar * q.mVector[0] + self.mVector[0] * q.mScalar +
self.mVector[1] * q.mVector[2] - self.mVector[2] * q.mVector[1])
b = (self.mScalar * q.mVector[1] - self.mVector[0] * q.mVector[2] +
self.mVector[1] * q.mScalar + self.mVector[2] * q.mVector[0])
c = (self.mScalar * q.mVector[2] + self.mVector[0] * q.mVector[1] -
self.mVector[1] * q.mVector[0] + self.mVector[2] * q.mScalar)
return Quaternion(s, a, b, c)
def conj(self):
'return the conjugate of a quaternion.'
return Quaternion(self.mScalar, -self.mVector[0],
-self.mVector[1], -self.mVector[2])
def norm(self):
'return the norm of a quaternion.'
return math.sqrt(sum([x*x for x in self.mVector])
+ self.mScalar * self.mScalar)
def normalize(self):
'reset the quaternion so that it has norm = 1'
n_reciprocal = 1.0 / self.norm()
self.mScalar = self.mScalar * n_reciprocal
self.mVector.scale(n_reciprocal)
def inverse(self):
"""Invert the quaternion and return the inverse.
inverse = conjugate / (norm^2)
"""
n = self.norm()
c = self.conj()
d = 1.0 / (n * n)
c.scale(d)
return c
def invert(self):
'Invert in place.'
n = self.norm()
d = 1.0 / (n * n)
for i in range(0, 3) :
self.mVector[i] *= -d
self.mScalar *= d
@staticmethod
def forRotation(axis, angle):
"""
Return the quaternion which represents a rotation about
the provided axis (vector) by angle (in radians).
"""
if round(axis.norm(),6) != 1.0:
raise ValueError('rotation axis must be a unit vector!')
half_angle = angle * 0.5
c = math.cos(half_angle)
s = math.sin(half_angle)
return Quaternion.fromScalarVector(c, axis.mults(s))
########################################################################
# Unit tests for Quaternions
class QuaternionTest(unittest.TestCase):
'Unit tests for Quaternions'
def setUp(self):
''
pass
def tearDown(self):
''
pass
def testEquals(self):
'Test equality operator.'
q1 = Quaternion(1, 2, 3, 4)
q2 = Quaternion(1, 2, 3, 4)
assert q1 == q2
assert not (q1 != q2)
q3 = Quaternion.fromScalarVector(1, Vector(2, 3, 4))
assert q2 == q3
def testCompare(self):
'Test comparison.'
q = Quaternion(1, 2, 3, 4)
assert q.compare([1, 2, 3, 4])
assert not q.compare([1, 2, 3, 4, 5])
assert not q.compare([0, 2, 3, 4])
def testAdd(self):
'Test quaternion addition'
q1 = Quaternion(1, 2, 3, 4)
q2 = Quaternion(5, 6, 7, 8)
assert q1 + q2 == Quaternion(6, 8, 10, 12)
qa = Quaternion(2, -2, 3, -4)
qb = Quaternion(1, -2, 5, -6)
assert qa + qb == Quaternion(3, -4, 8, -10)
assert qa - qb == Quaternion(1, 0, -2, 2)
def testSub(self):
'Test quaternion subtraction.'
q1 = Quaternion(1, 2, 3, 4)
q2 = Quaternion(5, 6, 7, 8)
assert (q2 - q1).compare([4, 4, 4, 4])
def testScale(self):
'Test quaternion scaling.'
q1 = Quaternion(1, 2, 3, 4)
q2 = q1.mults(3)
assert q1.compare([1, 2, 3, 4])
assert q2.compare([3, 6, 9, 12])
q2.scale(5)
assert q2.compare([15, 30, 45, 60])
def testMul(self):
'Test Quaternion multiplication'
q1 = Quaternion(-2, 0, 0, 0)
q2 = Quaternion(5, 0, 0, 0)
assert(q1.mul1(q2).compare([-10, 0, 0, 0]))
assert(q1.mul1(q2) == q1.mul2(q2))
q1 = Quaternion(1, 2, 3, 4)
q2 = Quaternion(4, 3, 2, 1)
assert(q1.mul1(q2) == Quaternion(-12, 6, 24, 12))
assert(q1.mul1(q2) == q1.mul2(q2))
qa = Quaternion(1, 2, 3, 4)
qb = Quaternion(2, 3, 4, 5)
assert qa.mul1(qb).compare([-36, 6, 12, 12])
assert qa.mul2(qb).compare([-36, 6, 12, 12])
qa = Quaternion(2, -2, 3, -4)
qb = Quaternion(1, -2, 5, -6)
assert qb.mulq(qa).compare([-41, -8, 17, -12])
assert qa.mulq(qb).compare([-41, -4, 9, -20])
def testMul2(self):
'Verify that Quaternion obeys the basic laws of quaternions.'
neg1 = Quaternion(-1, 0, 0, 0)
i = Quaternion(0, 1, 0, 0)
j = Quaternion(0, 0, 1, 0)
k = Quaternion(0, 0, 0, 1)
negi = i.mults(-1)
negj = j.mults(-1)
negk = k.mults(-1)
assert(i.mul1(i) == neg1) # i^2 == -1
assert(j.mul1(j) == neg1) # j^2 == -1
assert(k.mul1(k) == neg1) # k^2 == -1
assert(i.mul1(j).mul1(k) == neg1) # ijk == -1
assert(i.mul1(j) == k) # ij == k
assert(j.mul1(k) == i) # jk == i
assert(k.mul1(i) == j) # ki == j
assert(j.mul1(i) == negk) # ji == -k
assert(k.mul1(j) == negi) # kj == -i
assert(i.mul1(k) == negj) # ik == -j
def testMul3(self):
'Verify that Quaternion obeys the basic laws of quaternions.'
neg1 = Quaternion(-1, 0, 0, 0)
i = Quaternion(0, 1, 0, 0)
j = Quaternion(0, 0, 1, 0)
k = Quaternion(0, 0, 0, 1)
negi = i.mults(-1)
negj = j.mults(-1)
negk = k.mults(-1)
assert(i.mul2(i) == neg1) # i^2 == -1
assert(j.mul2(j) == neg1) # j^2 == -1
assert(k.mul2(k) == neg1) # k^2 == -1
assert(i.mul2(j).mul2(k) == neg1) # ijk == -1
assert(i.mul2(j) == k) # ij == k
assert(j.mul2(k) == i) # jk == i
assert(k.mul2(i) == j) # ki == j
assert(j.mul2(i) == negk) # ji == -k
assert(k.mul2(j) == negi) # kj == -i
assert(i.mul2(k) == negj) # ik == -j
def testMulq4(self):
'Test Quaternion multiplication'
q1 = Quaternion(-2, 0, 0, 0)
q2 = Quaternion(5, 0, 0, 0)
assert(q1.mulq(q2).compare([-10, 0, 0, 0]))
q1 = Quaternion(1, 2, 3, 4)
q2 = Quaternion(4, 3, 2, 1)
assert(q1.mulq(q2) == Quaternion(-12, 6, 24, 12))
qa = Quaternion(1, 2, 3, 4)
qb = Quaternion(2, 3, 4, 5)
assert qa.mulq(qb).compare([-36, 6, 12, 12])
neg1 = Quaternion(-1, 0, 0, 0)
i = Quaternion(0, 1, 0, 0)
j = Quaternion(0, 0, 1, 0)
k = Quaternion(0, 0, 0, 1)
negi = i.mults(-1)
negj = j.mults(-1)
negk = k.mults(-1)
assert(i.mulq(i) == neg1) # i^2 == -1
assert(j.mulq(j) == neg1) # j^2 == -1
assert(k.mulq(k) == neg1) # k^2 == -1
assert(i.mulq(j).mulq(k) == neg1) # ijk == -1
assert(i.mulq(j) == k) # ij == k
assert(j.mulq(k) == i) # jk == i
assert(k.mulq(i) == j) # ki == j
assert(j.mulq(i) == negk) # ji == -k
assert(k.mulq(j) == negi) # kj == -i
assert(i.mulq(k) == negj) # ik == -j
def testPrint(self):
'Test printing functionality'
q = Quaternion(1, 2, 3, 4)
assert q.__str__() == '[ 1.000000, [ 2.000000, 3.000000, 4.000000 ] ]'
def testConjugate(self):
'Test conjugate operation.'
q1 = Quaternion(1, 2, 3, 4)
assert q1.conj().compare([1, -2, -3, -4])
q2 = q1.conj()
assert q1.mulq(q2).compare([30, 0, 0, 0])
def testNorm(self):
'Test norm function'
q1 = Quaternion(1, 4, 4, -4)
assert q1.norm() == 7
q2 = Quaternion(1, 4, 4, -4)
q2.normalize()
assert q2.norm() == 1
def testInvert(self):
'Test Quaternion inversion.'
q1 = Quaternion(1, 2, 3, 4)
q2 = q1.inverse()
assert q1 != q2
q1.invert()
assert q1 == q2
assert q1.compare([1.0/30.0, -2.0/30.0, -3.0/30.0, -4.0/30.0])
def testAlternateRepresentation(self):
'Test the alternate representation of the quaternion.'
q = Quaternion(3, -4, 5, -7)
s = q.str2()
assert s == '3 - 4.0i + 5.0j - 7.0k', s
def testRotationalQuaternion(self):
'Test the quaternion representation of a rotation.'
axis = Vector(1, 1, 1).normalize()
angle = 2.0 # radians!
q1 = Quaternion.forRotation(axis, angle)
vv = math.sin(1.0) / (math.sqrt(3.0))
cc = math.cos(1.0)
q2 = Quaternion(cc, vv, vv, vv)
assert q1.__str__() == q2.__str__(), '%s %s' % (q1, q2)
hitError = False
axis = axis.mults(1.2)
try:
q1 = Quaternion.forRotation(axis, angle)
except ValueError, e:
assert e.message == 'rotation axis must be a unit vector!'
hitError = True
assert hitError
def testClone(self):
q1 = Quaternion(1, 2, 3, 4)
q2 = q1.clone()
assert q1 == q2
q2.mScalar = 7
assert q1 != q2
q2 = q1.clone()
assert q1 == q2
q2.mVector[1] = -3
assert q1 != q2
|
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Pavel Lazar pavel.lazar (at) gmail.com
#
# The Software is provided WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED.
#####################################################################
import json
import capabilities
import re
from collections import OrderedDict
from configuration_builder_exceptions import OpenBoxBlockConfigurationError
class FieldType:
BOOLEAN = 'boolean'
ARRAY = 'array'
INTEGER = 'integer'
NUMBER = 'number'
NULL = 'null'
OBJECT = 'object'
STRING = 'string'
MAC_ADDRESS = 'mac_address'
IPV4_ADDRESS = 'ipv4_address'
MATCH_PATTERNS = 'match_patterns'
COMPOUND_MATCHES = 'compound_matches'
IPV4_TRANSLATION_RULES = 'ipv4_translator_rules'
class ConfigField(object):
_SUPPORTED_MATCH_FIELDS = set(capabilities.SUPPORTED_MATCH_FIELDS)
_TRANSLATION_RULES_REGEX = [re.compile(r'(drop|discard)'),
re.compile(r'pass \d+'),
re.compile(r'keep \d+ \d+'),
re.compile(
r"pattern ((?:[0-9]{1,3}\.){3}[0-9]{1,3}|-) [0-9-#?]+ ((?:[0-9]{1,3}\.){3}[0-9]{1,3}|-) [0-9-#?]+ \d+ \d+")]
_MAC_ADDRESS = re.compile(r"^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$")
_IPV4_ADDRESS = re.compile(
r"(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)")
def __init__(self, name, required, type, description=None):
self.name = name
self.required = required
self.type = type
self.description = description or ''
@classmethod
def from_dict(cls, config):
name = config['name']
required = bool(config['required'])
type = config['type']
descr = config.get('description', None)
if type not in FieldType.__dict__.values():
raise ValueError("unknown type %s for field" % type)
return cls(name, required, type, descr)
def validate_value_type(self, value):
if self.type == FieldType.NULL:
return value is None
elif self.type == FieldType.BOOLEAN:
return isinstance(value, bool)
elif self.type == FieldType.ARRAY:
return isinstance(value, list)
elif self.type == FieldType.INTEGER:
return isinstance(value, (int, long))
elif self.type == FieldType.NUMBER:
return isinstance(value, (int, long, float))
elif self.type == FieldType.STRING:
return isinstance(value, basestring)
elif self.type == FieldType.OBJECT:
return isinstance(value, dict)
elif self.type == FieldType.MATCH_PATTERNS:
return (isinstance(value, (tuple, list)) and
all(self._is_valid_match_pattern(pattern) for pattern in value))
elif self.type == FieldType.IPV4_TRANSLATION_RULES:
return (isinstance(value, (tuple, list)) and
all(self._is_valid_ipv4_translation_rule(input_spec) for input_spec in value))
elif self.type == FieldType.MAC_ADDRESS:
return isinstance(value, str) and self._is_valid_mac_address(value)
elif self.type == FieldType.IPV4_ADDRESS:
return isinstance(value, str) and self._is_valid_ipv4_address(value)
elif self.type == FieldType.COMPOUND_MATCHES:
return (isinstance(value, (tuple, list)) and
all(self._is_valid_compound_match(match) for match in value))
def _is_valid_match_pattern(self, pattern):
return isinstance(pattern, dict) and all(field in self._SUPPORTED_MATCH_FIELDS for field in pattern)
def _is_valid_ipv4_translation_rule(self, rule):
for regex in self._TRANSLATION_RULES_REGEX:
if regex.match(rule):
return True
return False
def to_dict(self):
result = OrderedDict()
result['name'] = self.name
result['required'] = self.required
result['type'] = self.type
result['description'] = self.description
return result
def _is_valid_mac_address(self, value):
return self._MAC_ADDRESS.match(value) is not None
def _is_valid_ipv4_address(self, value):
return self._IPV4_ADDRESS.match(value) is not None
def _is_valid_compound_match(self, match):
try:
return (match['type'] == 'HeaderPayloadMatch' and
self._is_valid_match_pattern(match['header_match']) and
self._is_valid_payload_match(match['payload_match']))
except KeyError:
return False
def _is_valid_payload_match(self, match):
return (isinstance(match, (tuple, list)) and
all(self._is_valid_payload_pattern(pattern) for pattern in match))
def _is_valid_payload_pattern(self, pattern):
try:
return (pattern['type'] == 'PayloadPattern' and
isinstance(pattern['pattern'], (str, unicode)))
except KeyError:
return False
class HandlerField(object):
def __init__(self, name, type, description=None):
self.name = name
self.type = type
self.description = description or ''
def to_dict(self):
result = OrderedDict()
result['name'] = self.name
result['type'] = self.type
result['description'] = self.description
return result
@classmethod
def from_dict(cls, config):
name = config['name']
type = config['type']
descr = config.get('description', None)
if type not in FieldType.__dict__.values():
raise ValueError("unknown type %s for field" % type)
return cls(name, type, descr)
class OpenBoxBlockMeta(type):
def __init__(cls, name, bases, dct):
if not hasattr(cls, "blocks_registry"):
# this is the base class. Create an empty registry
cls.blocks_registry = {}
else:
# this is the derived class. Add cls to the registry
cls.blocks_registry[name] = cls
super(OpenBoxBlockMeta, cls).__init__(name, bases, dct)
class OpenBoxBlock(object):
"""
The base class for all blocks
"""
__metaclass__ = OpenBoxBlockMeta
__fields__ = []
__read_handlers__ = []
__write_handlers__ = []
def __init__(self, name, **kwargs):
self.name = name
for field in self.__fields__:
try:
value = kwargs[field.name]
if not field.validate_value_type(value):
raise TypeError("Field '{field}' is not a valid '{rtype}'".format(field=field.name,
rtype=field.type))
setattr(self, field.name, value)
except KeyError:
if field.required:
raise ValueError("Required field '{field}' not given".format(field=field.name))
@classmethod
def from_dict(cls, config):
"""
Create an instance of an OpenBox Block from the blocks configuration dict
:param config: The block's configuration
:type config: dict
:return: An instance of a specific type
:rtype: OpenBoxBlock
"""
block_type = config.pop('type')
if block_type is None:
raise OpenBoxBlockConfigurationError("No block type is given in the block's configuration")
# noinspection PyUnresolvedReferences
clazz = cls.blocks_registry.get(block_type)
if clazz is None:
raise OpenBoxBlockConfigurationError("Unknown block type %s" % block_type)
name = config.pop('name')
if name is None:
raise OpenBoxBlockConfigurationError("A block must have an instance name")
config = config.pop('config')
return clazz(name, **config)
@classmethod
def to_dict_schema(cls):
schema = OrderedDict()
schema['type'] = cls.__name__
schema['configuration'] = [field.to_dict() for field in cls.__fields__]
schema['read_handlers'] = [field.to_dict() for field in cls.__read_handlers__]
schema['write_handlers'] = [field.to_dict() for field in cls.__write_handlers__]
return schema
@classmethod
def to_json_schema(cls, **kwargs):
return json.dumps(cls.to_dict_schema(), **kwargs)
def to_dict(self):
result = OrderedDict()
result['type'] = self.__class__.__name__
result['name'] = self.name
config = dict()
for field in self.__fields__:
value = getattr(self, field.name, None)
if value is not None:
config[field.name] = value
result['config'] = config
return result
def to_json(self, **kwargs):
return json.dumps(self.to_dict(), **kwargs)
def __str__(self):
return self.to_json()
@property
def type(self):
return self.__class__.__name__
def __eq__(self, other):
if not isinstance(self, other.__class__):
return False
return self.name == other.name and all(
getattr(self, field.name, None) == getattr(other, field.name, None) for field in self.__fields__)
def __ne__(self, other):
return not self.__eq__(other)
def build_open_box_block(name, config_fields=None, read_handlers=None, write_handlers=None):
"""
Create an OpenBoxBlock class based on the arguments it receives.
:param string name: The class's name
:param list(ConfigField) config_fields: The configuration fields
:param list(HandlerField) read_handlers: The read handlers
:param list(HandlerField)write_handlers: The write handlers
:return: An OpenBoxBlock class
:rtype: OpenBoxBlock
"""
config_fields = config_fields or []
read_handlers = read_handlers or []
write_handlers = write_handlers or []
if not all(isinstance(field, ConfigField) for field in config_fields):
raise TypeError("All config fields must be of type ConfigField")
if not all(isinstance(field, HandlerField) for field in read_handlers):
raise TypeError("All read handlers must be of type HandlerField")
if not all(isinstance(field, HandlerField) for field in write_handlers):
raise TypeError("All write handlers must be of type HandlerField")
args = dict(__fields__=config_fields, __read_handlers__=read_handlers, __write_handlers__=write_handlers)
return OpenBoxBlockMeta(name, (OpenBoxBlock,), args)
def build_open_box_block_from_dict(block):
name = block['name']
config_fields = [ConfigField.from_dict(cfg) for cfg in block.get('config_fields', [])]
read_handlers = [ConfigField.from_dict(handler) for handler in block.get('read_handlers', [])]
write_handlers = [ConfigField.from_dict(handler) for handler in block.get('write_handlers', [])]
return build_open_box_block(name, config_fields, read_handlers, write_handlers)
def build_open_box_from_json(json_block):
return build_open_box_block_from_dict(json.loads(json_block))
FromDevice = build_open_box_block('FromDevice',
config_fields=[
ConfigField('devname', True, FieldType.STRING),
ConfigField('sniffer', False, FieldType.BOOLEAN),
ConfigField('promisc', False, FieldType.BOOLEAN),
ConfigField('snaplen', False, FieldType.INTEGER),
],
read_handlers=[
HandlerField('count', FieldType.INTEGER),
HandlerField('byte_count', FieldType.INTEGER),
HandlerField('rate', FieldType.NUMBER),
HandlerField('byte_rate', FieldType.INTEGER),
HandlerField('drops', FieldType.STRING),
],
write_handlers=[
HandlerField('reset_counts', FieldType.NULL)
])
FromDump = build_open_box_block('FromDump',
config_fields=[
ConfigField('filename', True, FieldType.STRING),
ConfigField('timing', False, FieldType.BOOLEAN),
ConfigField('active', False, FieldType.BOOLEAN),
],
read_handlers=[
HandlerField('count', FieldType.INTEGER),
HandlerField('byte_count', FieldType.INTEGER),
HandlerField('rate', FieldType.NUMBER),
HandlerField('byte_rate', FieldType.INTEGER),
HandlerField('drops', FieldType.STRING),
],
write_handlers=[
HandlerField('reset_counts', FieldType.NULL),
HandlerField('active', FieldType.BOOLEAN)
])
Discard = build_open_box_block('Discard',
config_fields=[
],
read_handlers=[
HandlerField('count', FieldType.INTEGER),
HandlerField('byte_count', FieldType.INTEGER),
HandlerField('rate', FieldType.NUMBER),
HandlerField('byte_rate', FieldType.NUMBER),
HandlerField('drops', FieldType.STRING),
],
write_handlers=[
HandlerField('reset_counts', FieldType.NULL),
HandlerField('active', FieldType.BOOLEAN)
])
ToDevice = build_open_box_block('ToDevice',
config_fields=[
ConfigField('devname', True, FieldType.STRING),
])
ToDump = build_open_box_block('ToDump',
config_fields=[
ConfigField('filename', True, FieldType.STRING),
])
Log = build_open_box_block('Log',
config_fields=[
ConfigField('message', True, FieldType.STRING),
ConfigField('severity', False, FieldType.INTEGER),
ConfigField('attach_packet', False, FieldType.BOOLEAN),
ConfigField('packet_size', False, FieldType.INTEGER),
],
read_handlers=[
],
write_handlers=[
])
Alert = build_open_box_block('Alert',
config_fields=[
ConfigField('message', True, FieldType.STRING),
ConfigField('severity', False, FieldType.INTEGER),
ConfigField('attach_packet', False, FieldType.BOOLEAN),
ConfigField('packet_size', False, FieldType.INTEGER),
],
read_handlers=[
],
write_handlers=[
])
ContentClassifier = build_open_box_block('ContentClassifier',
config_fields=[
ConfigField('pattern', True, FieldType.ARRAY)
],
read_handlers=[
HandlerField('count', FieldType.INTEGER),
HandlerField('byte_count', FieldType.INTEGER),
HandlerField('rate', FieldType.NUMBER),
HandlerField('byte_rate', FieldType.NUMBER),
],
write_handlers=[
HandlerField('reset_counts', FieldType.NULL)
])
HeaderClassifier = build_open_box_block('HeaderClassifier',
config_fields=[
ConfigField('match', True, FieldType.MATCH_PATTERNS),
ConfigField('allow_vlan', False, FieldType.BOOLEAN),
],
read_handlers=[
HandlerField('count', FieldType.INTEGER),
HandlerField('byte_count', FieldType.INTEGER),
HandlerField('rate', FieldType.NUMBER),
HandlerField('byte_rate', FieldType.NUMBER),
],
write_handlers=[
HandlerField('reset_counts', FieldType.NULL)
])
RegexMatcher = build_open_box_block('RegexMatcher',
config_fields=[
ConfigField('pattern', True, FieldType.ARRAY),
ConfigField('payload_only', False, FieldType.BOOLEAN),
ConfigField('match_all', False, FieldType.BOOLEAN)
],
read_handlers=[
HandlerField('count', FieldType.INTEGER),
HandlerField('byte_count', FieldType.INTEGER),
HandlerField('rate', FieldType.NUMBER),
HandlerField('byte_rate', FieldType.NUMBER),
HandlerField('payload_only', FieldType.BOOLEAN),
HandlerField('match_all', FieldType.BOOLEAN),
],
write_handlers=[
HandlerField('reset_counts', FieldType.NULL),
HandlerField('payload_only', FieldType.BOOLEAN),
HandlerField('match_all', FieldType.BOOLEAN),
]
)
RegexClassifier = build_open_box_block('RegexClassifier',
config_fields=[
ConfigField('pattern', True, FieldType.ARRAY),
ConfigField('payload_only', False, FieldType.BOOLEAN),
ConfigField('max_regex_memory', False, FieldType.INTEGER),
],
read_handlers=[
HandlerField('count', FieldType.INTEGER),
HandlerField('byte_count', FieldType.INTEGER),
HandlerField('rate', FieldType.NUMBER),
HandlerField('byte_rate', FieldType.NUMBER),
HandlerField('payload_only', FieldType.BOOLEAN),
],
write_handlers=[
HandlerField('reset_counts', FieldType.NULL),
HandlerField('payload_only', FieldType.BOOLEAN),
]
)
VlanDecapsulate = build_open_box_block('VlanDecapsulate')
VlanEncapsulate = build_open_box_block('VlanEncapsulate',
config_fields=[
ConfigField('vlan_vid', True, FieldType.INTEGER),
ConfigField('vlan_dei', False, FieldType.INTEGER),
ConfigField('vlan_pcp', False, FieldType.INTEGER),
ConfigField('ethertype', False, FieldType.INTEGER),
],
read_handlers=[
HandlerField('vlan_vid', FieldType.INTEGER),
HandlerField('vlan_dei', FieldType.INTEGER),
HandlerField('vlan_pcp', FieldType.INTEGER),
HandlerField('vlan_tci', FieldType.INTEGER),
HandlerField('ethertype', FieldType.INTEGER),
],
write_handlers=[
HandlerField('vlan_vid', FieldType.INTEGER),
HandlerField('vlan_dei', FieldType.INTEGER),
HandlerField('vlan_pcp', FieldType.INTEGER),
HandlerField('vlan_tci', FieldType.INTEGER),
HandlerField('ethertype', FieldType.INTEGER),
])
DecIpTtl = build_open_box_block('DecIpTtl',
config_fields=[
ConfigField('active', False, FieldType.BOOLEAN),
],
read_handlers=[
HandlerField('count', FieldType.INTEGER),
HandlerField('byte_count', FieldType.INTEGER),
HandlerField('rate', FieldType.NUMBER),
HandlerField('byte_rate', FieldType.NUMBER),
HandlerField('active', FieldType.BOOLEAN),
],
write_handlers=[
HandlerField('reset_counts', FieldType.NULL),
HandlerField('active', FieldType.BOOLEAN),
])
Ipv4AddressTranslator = build_open_box_block('Ipv4AddressTranslator',
config_fields=[
ConfigField('input_spec', True, FieldType.IPV4_TRANSLATION_RULES),
ConfigField('tcp_done_timeout', False, FieldType.INTEGER),
ConfigField('tcp_nodata_timeout', False, FieldType.INTEGER),
ConfigField('tcp_guarantee', False, FieldType.INTEGER),
ConfigField('udp_timeout', False, FieldType.INTEGER),
ConfigField('udp_streaming_timeout', False, FieldType.INTEGER),
ConfigField('udp_guarantee', False, FieldType.INTEGER),
ConfigField('reap_interval', False, FieldType.INTEGER),
ConfigField('mapping_capacity', False, FieldType.INTEGER)
],
read_handlers=[
HandlerField('mapping_count', FieldType.INTEGER),
HandlerField('mapping_failures', FieldType.INTEGER),
HandlerField('length', FieldType.INTEGER),
HandlerField('capacity', FieldType.INTEGER),
HandlerField('tcp_mapping', FieldType.STRING),
HandlerField('udp_mapping', FieldType.STRING),
],
write_handlers=[
HandlerField('capacity', FieldType.INTEGER)
])
Queue = build_open_box_block('Queue',
config_fields=[
ConfigField('capacity', False, FieldType.INTEGER),
],
read_handlers=[
HandlerField('length', FieldType.INTEGER),
HandlerField('highwater_length', FieldType.INTEGER),
HandlerField('drops', FieldType.INTEGER),
HandlerField('capacity', FieldType.INTEGER),
],
write_handlers=[
HandlerField('reset_counts', FieldType.INTEGER),
HandlerField('reset', FieldType.INTEGER)
]
)
NetworkDirectionSwap = build_open_box_block('NetworkDirectionSwap',
config_fields=[
ConfigField('ethernet', False, FieldType.BOOLEAN),
ConfigField('ipv4', False, FieldType.BOOLEAN),
ConfigField('ipv6', False, FieldType.BOOLEAN),
ConfigField('tcp', False, FieldType.BOOLEAN),
ConfigField('udp', False, FieldType.BOOLEAN),
],
read_handlers=[
HandlerField('ethernet', FieldType.BOOLEAN),
HandlerField('ipv4', FieldType.BOOLEAN),
HandlerField('ipv6', FieldType.BOOLEAN),
HandlerField('tcp', FieldType.BOOLEAN),
HandlerField('udp', FieldType.BOOLEAN),
],
write_handlers=[
HandlerField('ethernet', FieldType.BOOLEAN),
HandlerField('ipv4', FieldType.BOOLEAN),
HandlerField('ipv6', FieldType.BOOLEAN),
HandlerField('tcp', FieldType.BOOLEAN),
HandlerField('udp', FieldType.BOOLEAN),
])
NetworkHeaderFieldsRewriter = build_open_box_block('NetworkHeaderFieldsRewriter',
config_fields=[
ConfigField('eth_src', False, FieldType.MAC_ADDRESS),
ConfigField('eth_dst', False, FieldType.MAC_ADDRESS),
ConfigField('eth_type', False, FieldType.INTEGER),
ConfigField('ipv4_proto', False, FieldType.INTEGER),
ConfigField('ipv4_dscp', False, FieldType.INTEGER),
ConfigField('ipv4_ecn', False, FieldType.INTEGER),
ConfigField('ipv4_ttl', False, FieldType.INTEGER),
ConfigField('ipv4_src', False, FieldType.IPV4_ADDRESS),
ConfigField('ipv4_dst', False, FieldType.IPV4_ADDRESS),
ConfigField('tcp_src', False, FieldType.INTEGER),
ConfigField('tcp_dst', False, FieldType.INTEGER),
ConfigField('udp_src', False, FieldType.INTEGER),
ConfigField('udp_dst', False, FieldType.INTEGER),
],
read_handlers=[
HandlerField('eth_src', FieldType.MAC_ADDRESS),
HandlerField('eth_dst', FieldType.MAC_ADDRESS),
HandlerField('eth_type', FieldType.INTEGER),
HandlerField('ipv4_proto', FieldType.INTEGER),
HandlerField('ipv4_dscp', FieldType.INTEGER),
HandlerField('ipv4_ecn', FieldType.INTEGER),
HandlerField('ipv4_ttl', FieldType.INTEGER),
HandlerField('ipv4_src', FieldType.IPV4_ADDRESS),
HandlerField('ipv4_dst', FieldType.IPV4_ADDRESS),
HandlerField('tcp_src', FieldType.INTEGER),
HandlerField('tcp_dst', FieldType.INTEGER),
HandlerField('udp_src', FieldType.INTEGER),
HandlerField('udp_dst', FieldType.INTEGER)
],
write_handlers=[
HandlerField('eth_src', FieldType.MAC_ADDRESS),
HandlerField('eth_dst', FieldType.MAC_ADDRESS),
HandlerField('eth_type', FieldType.INTEGER),
HandlerField('ipv4_proto', FieldType.INTEGER),
HandlerField('ipv4_dscp', FieldType.INTEGER),
HandlerField('ipv4_ecn', FieldType.INTEGER),
HandlerField('ipv4_ttl', FieldType.INTEGER),
HandlerField('ipv4_src', FieldType.IPV4_ADDRESS),
HandlerField('ipv4_dst', FieldType.IPV4_ADDRESS),
HandlerField('tcp_src', FieldType.INTEGER),
HandlerField('tcp_dst', FieldType.INTEGER),
HandlerField('udp_src', FieldType.INTEGER),
HandlerField('udp_dst', FieldType.INTEGER)
])
HeaderPayloadClassifier = build_open_box_block('HeaderPayloadClassifier',
config_fields=[
ConfigField('match', True, FieldType.COMPOUND_MATCHES),
ConfigField('allow_vlan', False, FieldType.BOOLEAN),
],
read_handlers=[
HandlerField('count', FieldType.INTEGER),
HandlerField('byte_count', FieldType.INTEGER),
HandlerField('rate', FieldType.NUMBER),
HandlerField('byte_rate', FieldType.NUMBER),
],
write_handlers=[
HandlerField('reset_counts', FieldType.NULL)
])
SetTimestamp = build_open_box_block('SetTimestamp',
config_fields=[
ConfigField('timestamp', False, FieldType.STRING),
])
SetTimestampDelta = build_open_box_block('SetTimestampDelta',
config_fields=[
ConfigField('type', False, FieldType.STRING)
],
read_handlers=[
HandlerField('first', FieldType.STRING)
],
write_handlers=[
HandlerField('reset', FieldType.NULL)
])
StringClassifier = build_open_box_block('StringClassifier',
config_fields=[
ConfigField('pattern', True, FieldType.ARRAY),
],
read_handlers=[
HandlerField('count', FieldType.INTEGER),
HandlerField('byte_count', FieldType.INTEGER),
HandlerField('rate', FieldType.NUMBER),
HandlerField('byte_rate', FieldType.NUMBER),
],
write_handlers=[
HandlerField('reset_counts', FieldType.NULL),
]
)
if __name__ == '__main__':
blocks = [block.to_dict_schema() for block in OpenBoxBlock.blocks_registry.values()]
with open('blocks.json', 'wb') as f:
f.write(json.dumps(blocks, indent=2))
|
|
#!/usr/bin/python
#The MIT License (MIT)
#
#Copyright (c) 2015 Stephen P. Smith
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import time, math
import RPi.GPIO as GPIO
#import numpy
class max31865(object):
"""Reading Temperature from the MAX31865 with GPIO using
the Raspberry Pi. Any pins can be used.
Numpy can be used to completely solve the Callendar-Van Dusen equation
but it slows the temp reading down. I commented it out in the code.
Both the quadratic formula using Callendar-Van Dusen equation (ignoring the
3rd and 4th degree parts of the polynomial) and the straight line approx.
temperature is calculated with the quadratic formula one being the most accurate.
"""
def __init__(self, csPin, misoPin, mosiPin, clkPin, RefRest, ConfigReg):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.RefRest = RefRest
self.ConfigReg = ConfigReg
self.setupGPIO()
def setupGPIO(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.csPin, GPIO.OUT)
GPIO.setup(self.misoPin, GPIO.IN)
GPIO.setup(self.mosiPin, GPIO.OUT)
GPIO.setup(self.clkPin, GPIO.OUT)
GPIO.output(self.csPin, GPIO.HIGH)
GPIO.output(self.clkPin, GPIO.LOW)
GPIO.output(self.mosiPin, GPIO.LOW)
def readTemp(self):
#
# b10000000 = 0x80
# 0x8x to specify 'write register value'
# 0xx0 to specify 'configuration register'
#
#
# Config Register
# ---------------
# bit 7: Vbias -> 1 (ON), 0 (OFF)
# bit 6: Conversion Mode -> 0 (MANUAL), 1 (AUTO) !!don't change the noch fequency when auto
# bit5: 1-shot ->1 (ON)
# bit4: 3-wire select -> 1 (3 wires config), 0 (2 or 4 wires)
# bits 3-2: fault detection cycle -> 0 (none)
# bit 1: fault status clear -> 1 (clear any fault)
# bit 0: 50/60 Hz filter select -> 0 (60Hz - Faster converson), 1 (50Hz)
#
# 0b10110010 = 0xB2 (Manual conversion, 3 wires at 60Hz)
# 0b10100010 = 0xA2 (Manual conversion, 2 or 4 wires at 60Hz)
# 0b11010010 = 0xD2 (Continuous auto conversion, 3 wires at 60 Hz)
# 0b11000010 = 0xC2 (Continuous auto conversion, 2 or 4 wires at 60 Hz)
#
#one shot
self.writeRegister(0, self.ConfigReg)
# conversion time is less than 100ms
time.sleep(.1) #give it 100ms for conversion
# read all registers
out = self.readRegisters(0,8)
conf_reg = out[0]
#print "config register byte: %x" % conf_reg
[rtd_msb, rtd_lsb] = [out[1], out[2]]
rtd_ADC_Code = (( rtd_msb << 8 ) | rtd_lsb ) >> 1
temp_C = self.calcPT100Temp(rtd_ADC_Code)
[hft_msb, hft_lsb] = [out[3], out[4]]
hft = (( hft_msb << 8 ) | hft_lsb ) >> 1
#print "high fault threshold: %d" % hft
[lft_msb, lft_lsb] = [out[5], out[6]]
lft = (( lft_msb << 8 ) | lft_lsb ) >> 1
#print "low fault threshold: %d" % lft
status = out[7]
#
# 10 Mohm resistor is on breakout board to help
# detect cable faults
# bit 7: RTD High Threshold / cable fault open
# bit 6: RTD Low Threshold / cable fault short
# bit 5: REFIN- > 0.85 x VBias -> must be requested
# bit 4: REFIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 3: RTDIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 2: Overvoltage / undervoltage fault
# bits 1,0 don't care
#print "Status byte: %x" % status
if ((status & 0x80) == 1):
raise FaultError("High threshold limit (Cable fault/open)")
if ((status & 0x40) == 1):
raise FaultError("Low threshold limit (Cable fault/short)")
if ((status & 0x04) == 1):
raise FaultError("Overvoltage or Undervoltage Error")
return temp_C
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
# 0x8x to specify 'write register value'
addressByte = 0x80 | regNum;
# first byte is address byte
self.sendByte(addressByte)
# the rest are data bytes
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
# 0x to specify 'read register value'
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self,byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if (byte & 0x80):
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0x00
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 0x1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
def calcPT100Temp(self, RTD_ADC_Code):
R_REF = self.RefRest # Reference Resistor was 400.0
Res0 = 100; # Resistance at 0 degC for 400ohm R_Ref
a = .00390830
b = -.000000577500
# c = -4.18301e-12 # for -200 <= T <= 0 (degC)
#c = -0.00000000000418301
# c = 0 # for 0 <= T <= 850 (degC)
#print "RTD ADC Code: %d" % RTD_ADC_Code
Res_RTD = (RTD_ADC_Code * R_REF) / 32768.0 # PT100 Resistance
#print "PT100 Resistance: %f ohms" % Res_RTD
#
# Callendar-Van Dusen equation
# Res_RTD = Res0 * (1 + a*T + b*T**2 + c*(T-100)*T**3)
# Res_RTD = Res0 + a*Res0*T + b*Res0*T**2 # c = 0
# (c*Res0)T**4 - (c*Res0)*100*T**3
# + (b*Res0)*T**2 + (a*Res0)*T + (Res0 - Res_RTD) = 0
#
# quadratic formula:
# for 0 <= T <= 850 (degC)
temp_C = -(a*Res0) + math.sqrt(a*a*Res0*Res0 - 4*(b*Res0)*(Res0 - Res_RTD))
temp_C = temp_C / (2*(b*Res0))
temp_C_line = (RTD_ADC_Code/32.0) - 256.0
# removing numpy.roots will greatly speed things up
#temp_C_numpy = numpy.roots([c*Res0, -c*Res0*100, b*Res0, a*Res0, (Res0 - Res_RTD)])
#temp_C_numpy = abs(temp_C_numpy[-1])
#print "Straight Line Approx. Temp: %f degC" % temp_C_line
#print "Callendar-Van Dusen Temp (degC > 0): %f degC" % temp_C
#print "Solving Full Callendar-Van Dusen using numpy: %f" % temp_C_numpy
if (temp_C < 0): #use straight line approximation if less than 0
# Can also use python lib numpy to solve cubic
# Should never get here in this application
temp_C = (RTD_ADC_Code/32) - 256
#print temp_C
return temp_C
class FaultError(Exception):
pass
if __name__ == "__main__":
import max31865
csPin = 8
misoPin = 9
mosiPin = 10
clkPin = 11
max = max31865.max31865(csPin,misoPin,mosiPin,clkPin)
tempC = max.readTemp()
GPIO.cleanup()
|
|
#
# Copyright 2015 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
#!/usr/bin/env python
usage = '''
Write extra flags to outfile for DM based on the bot name:
$ python dm_flags.py outfile Test-Ubuntu-GCC-GCE-CPU-AVX2-x86-Debug
Or run self-tests:
$ python dm_flags.py test
'''
import inspect
import json
import os
import sys
def lineno():
caller = inspect.stack()[1] # Up one level to our caller.
return inspect.getframeinfo(caller[0]).lineno
cov_start = lineno()+1 # We care about coverage starting just past this def.
def get_args(bot):
args = []
configs = ['565', '8888', 'gpu']
if 'Android' not in bot:
configs.extend(('upright-matrix-8888', 'upright-matrix-gpu'))
args.extend('--matrix 0 1 1 0'.split(' '))
if '-GCE-' in bot:
configs.append('sp-8888')
if 'TegraK1' in bot or 'GTX550Ti' in bot or 'GTX660' in bot or 'GT610' in bot:
if 'Android' in bot:
configs.append('nvprmsaa4')
else:
configs.append('nvprmsaa16')
# The S4 crashes and the NP produces a long error stream when we run with
# MSAA. The Tegra2 and Tegra3 just don't support it.
if ('GalaxyS4' not in bot and
'NexusPlayer' not in bot and
'Tegra3' not in bot and
'iOS' not in bot):
if 'Android' in bot:
configs.append('msaa4')
else:
configs.append('msaa16')
# Runs out of memory on Android bots and Daisy. Everyone else seems fine.
if 'Android' not in bot and 'Daisy' not in bot:
configs.append('pdf')
# NP is running out of RAM when we run all these modes. skia:3255
if 'NexusPlayer' not in bot:
configs.extend(mode + '-8888' for mode in
['serialize', 'tiles_rt', 'pipe'])
if 'ANGLE' in bot:
configs.append('angle')
args.append('--config')
args.extend(configs)
# Run tests and gms everywhere,
# and image decoding tests everywhere except GPU bots.
# TODO: remove skp from default --src list?
if 'GPU' in bot:
args.extend('--src tests gm'.split(' '))
else:
args.extend('--src tests gm image'.split(' '))
if 'GalaxyS' in bot:
args.extend(('--threads', '0'))
blacklist = []
# This image is too large to be a texture for many GPUs.
blacklist.extend('gpu _ _ PANO_20121023_214540.jpg'.split(' '))
blacklist.extend('msaa _ _ PANO_20121023_214540.jpg'.split(' '))
# Several of the newest version bmps fail on SkImageDecoder
blacklist.extend('_ image decode pal8os2v2.bmp'.split(' '))
blacklist.extend('_ image decode pal8v4.bmp'.split(' '))
blacklist.extend('_ image decode pal8v5.bmp'.split(' '))
blacklist.extend('_ image decode rgb16-565.bmp'.split(' '))
blacklist.extend('_ image decode rgb16-565pal.bmp'.split(' '))
blacklist.extend('_ image decode rgb32-111110.bmp'.split(' '))
blacklist.extend('_ image decode rgb32bf.bmp'.split(' '))
blacklist.extend('_ image decode rgba32.bmp'.split(' '))
blacklist.extend('_ image decode rgba32abf.bmp'.split(' '))
blacklist.extend('_ image decode rgb24largepal.bmp'.split(' '))
blacklist.extend('_ image decode pal8os2v2-16.bmp'.split(' '))
blacklist.extend('_ image decode pal8oversizepal.bmp'.split(' '))
blacklist.extend('_ image decode pal4rletrns.bmp'.split(' '))
blacklist.extend('_ image decode pal8rletrns.bmp'.split(' '))
blacklist.extend('_ image decode 4bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image decode 8bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image decode 24bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image decode 32bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image subset rgb24largepal.bmp'.split(' '))
blacklist.extend('_ image subset pal8os2v2-16.bmp'.split(' '))
blacklist.extend('_ image subset pal8oversizepal.bmp'.split(' '))
blacklist.extend('_ image subset 4bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image subset 8bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image subset 24bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image subset 32bpp-pixeldata-cropped.bmp'.split(' '))
# New ico files that fail on SkImageDecoder
blacklist.extend('_ image decode Hopstarter-Mac-Folders-Apple.ico'.split(' '))
# Leon doesn't care about this, so why run it?
if 'Win' in bot:
blacklist.extend('_ image decode _'.split(' '))
blacklist.extend('_ image subset _'.split(' '))
# Certain gm's on win7 gpu and pdf are never finishing and keeping the test
# running forever
if 'Win7' in bot:
blacklist.extend('msaa16 gm _ colorwheelnative'.split(' '))
blacklist.extend('pdf gm _ fontmgr_iter_factory'.split(' '))
if 'Valgrind' in bot:
# PDF + .webp -> jumps depending on uninitialized memory. skia:3505
blacklist.extend('pdf _ _ .webp'.split(' '))
# These take 18+ hours to run.
blacklist.extend('pdf gm _ fontmgr_iter'.split(' '))
blacklist.extend('pdf _ _ PANO_20121023_214540.jpg'.split(' '))
blacklist.extend('pdf skp _ worldjournal'.split(' '))
blacklist.extend('pdf skp _ desk_baidu.skp'.split(' '))
blacklist.extend('pdf skp _ desk_wikipedia.skp'.split(' '))
if 'iOS' in bot:
blacklist.extend('gpu skp _ _ msaa skp _ _'.split(' '))
blacklist.extend('gpu image decode _ msaa image decode _'.split(' '))
blacklist.extend('gpu image subset _ msaa image subset _'.split(' '))
blacklist.extend('msaa16 gm _ tilemodesProcess'.split(' '))
if blacklist:
args.append('--blacklist')
args.extend(blacklist)
match = []
if 'Valgrind' in bot: # skia:3021
match.append('~Threaded')
if 'TSAN' in bot: # skia:3562
match.append('~Math')
if 'GalaxyS3' in bot: # skia:1699
match.append('~WritePixels')
# skia:3249: these images flakily don't decode on Android.
if 'Android' in bot:
match.append('~tabl_mozilla_0')
match.append('~desk_yahoonews_0')
if 'NexusPlayer' in bot:
match.append('~ResourceCache')
if 'iOS' in bot:
match.append('~WritePixels')
if match:
args.append('--match')
args.extend(match)
return args
cov_end = lineno() # Don't care about code coverage past here.
def self_test():
import coverage # This way the bots don't need coverage.py to be installed.
args = {}
cases = [
'Pretend-iOS-Bot',
'Test-Android-GCC-Nexus9-GPU-TegraK1-Arm64-Debug',
'Test-Android-GCC-GalaxyS3-GPU-Mali400-Arm7-Debug',
'Test-Android-GCC-GalaxyS4-GPU-SGX544-Arm7-Release',
'Test-Android-GCC-Nexus7-GPU-Tegra3-Arm7-Release',
'Test-Android-GCC-NexusPlayer-CPU-SSSE3-x86-Release',
'Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-TSAN',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-Valgrind',
'Test-Win7-MSVC-ShuttleA-GPU-HD2000-x86-Debug-ANGLE',
]
cov = coverage.coverage()
cov.start()
for case in cases:
args[case] = get_args(case)
cov.stop()
this_file = os.path.basename(__file__)
_, _, not_run, _ = cov.analysis(this_file)
filtered = [line for line in not_run if line > cov_start and line < cov_end]
if filtered:
print 'Lines not covered by test cases: ', filtered
sys.exit(1)
golden = this_file.replace('.py', '.json')
with open(os.path.join(os.path.dirname(__file__), golden), 'w') as f:
json.dump(args, f, indent=2, sort_keys=True)
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
self_test()
sys.exit(0)
if len(sys.argv) != 3:
print usage
sys.exit(1)
with open(sys.argv[1], 'w') as out:
json.dump(get_args(sys.argv[2]), out)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to read the train/eval/test data from file and process it, and read the vocab data from file and process it"""
import glob
import random
import struct
import csv
import six
import click
import codecs
from tensorflow.core.example import example_pb2
# <s> and </s> are used in the data files to segment the abstracts into sentences. They don't receive vocab ids.
SENTENCE_START = '<s>'
SENTENCE_END = '</s>'
PAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence
UNKNOWN_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words
START_DECODING = '[START]' # This has a vocab id, which is used at the start of every decoder input sequence
STOP_DECODING = '[STOP]' # This has a vocab id, which is used at the end of untruncated target sequences
# Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in the vocab file.
class Vocab(object):
"""Vocabulary class for mapping between words and ids (integers)"""
def __init__(self, vocab_file, max_size):
"""Creates a vocab of up to max_size words, reading from the vocab_file. If max_size is 0, reads the entire vocab file.
Args:
vocab_file: path to the vocab file, which is assumed to contain "<word> <frequency>" on each line, sorted with most frequent word first. This code doesn't actually use the frequencies, though.
max_size: integer. The maximum size of the resulting Vocabulary."""
self._word_to_id = {}
self._id_to_word = {}
self._count = 0 # keeps track of total number of words in the Vocab
# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.
for w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
# Read the vocab file and add words up to max_size
with codecs.open(vocab_file, 'r', "utf-8") as vocab_f:
for line in vocab_f:
pieces = line.split()
if len(pieces) != 2:
print('Warning: incorrectly formatted line in vocabulary file: %s\n' % line)
continue
w = pieces[0]
if w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
raise Exception('<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\'t be in the vocab file, but %s is' % w)
if w in self._word_to_id:
raise Exception('Duplicated word in vocabulary file: %s' % w)
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
if max_size != 0 and self._count >= max_size:
print("max_size of vocab was specified as %i; we now have %i words. Stopping reading." % (max_size, self._count))
break
last_word = self._id_to_word[self._count-1]
if six.PY2:
last_word = last_word.encode("utf-8")
print("Finished constructing vocabulary of {} total words. Last word added: {}".format(self._count, last_word))
def word2id(self, word):
"""Returns the id (integer) of a word (string). Returns [UNK] id if word is OOV."""
if word not in self._word_to_id:
return self._word_to_id[UNKNOWN_TOKEN]
return self._word_to_id[word]
def id2word(self, word_id):
"""Returns the word (string) corresponding to an id (integer)."""
if word_id not in self._id_to_word:
raise ValueError('Id not found in vocab: %d' % word_id)
return self._id_to_word[word_id]
def size(self):
"""Returns the total size of the vocabulary"""
return self._count
def write_metadata(self, fpath):
"""Writes metadata file for Tensorboard word embedding visualizer as described here:
https://www.tensorflow.org/get_started/embedding_viz
Args:
fpath: place to write the metadata file
"""
print("Writing word embedding metadata file to %s..." % (fpath))
with codecs.open(fpath, "w", "utf-8") as f:
fieldnames = ['word']
writer = csv.DictWriter(f, delimiter="\t", fieldnames=fieldnames)
for i in range(self.size()):
word = self._id_to_word[i]
f.write(word+"\n")
def example_generator(data_path, single_pass):
"""Generates tf.Examples from data files.
Binary data format: <length><blob>. <length> represents the byte size
of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains
the tokenized article text and summary.
Args:
data_path:
Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.
single_pass:
Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely.
Yields:
Deserialized tf.Example.
"""
while True:
filelist = glob.glob(data_path) # get the list of datafiles
assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
for f in filelist:
reader = open(f, 'rb')
while True:
len_bytes = reader.read(8)
if not len_bytes: break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
yield example_pb2.Example.FromString(example_str)
if single_pass:
print("example_generator completed reading all datafiles. No more data.")
break
def article2ids(article_words, vocab):
"""Map the article words to their ids. Also return a list of OOVs in the article.
Args:
article_words: list of words (strings)
vocab: Vocabulary object
Returns:
ids:
A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
oovs:
A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers."""
ids = []
oovs = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in article_words:
i = vocab.word2id(w)
if i == unk_id: # If w is OOV
if w not in oovs: # Add to list of OOVs
oovs.append(w)
oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV...
ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second...
else:
ids.append(i)
return ids, oovs
def abstract2ids(abstract_words, vocab, article_oovs):
"""Map the abstract words to their ids. In-article OOVs are mapped to their temporary OOV numbers.
Args:
abstract_words: list of words (strings)
vocab: Vocabulary object
article_oovs: list of in-article OOV words (strings), in the order corresponding to their temporary article OOV numbers
Returns:
ids: List of ids (integers). In-article OOV words are mapped to their temporary OOV numbers. Out-of-article OOV words are mapped to the UNK token id."""
ids = []
unk_id = vocab.word2id(UNKNOWN_TOKEN)
for w in abstract_words:
i = vocab.word2id(w)
if i == unk_id: # If w is an OOV word
if w in article_oovs: # If w is an in-article OOV
vocab_idx = vocab.size() + article_oovs.index(w) # Map to its temporary article OOV number
ids.append(vocab_idx)
else: # If w is an out-of-article OOV
ids.append(unk_id) # Map to the UNK token id
else:
ids.append(i)
return ids
def outputids2words(id_list, vocab, article_oovs):
"""Maps output ids to words, including mapping in-article OOVs from their temporary ids to the original OOV string (applicable in pointer-generator mode).
Args:
id_list: list of ids (integers)
vocab: Vocabulary object
article_oovs: list of OOV words (strings) in the order corresponding to their temporary article OOV ids (that have been assigned in pointer-generator mode), or None (in baseline mode)
Returns:
words: list of words (strings)
"""
words = []
for i in id_list:
try:
w = vocab.id2word(i) # might be [UNK]
except ValueError as e: # w is OOV
assert article_oovs is not None, "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode"
article_oov_idx = i - vocab.size()
try:
w = article_oovs[article_oov_idx]
except ValueError as e: # i doesn't correspond to an article oov
raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs)))
words.append(w)
return words
def abstract2sents(abstract):
"""Splits abstract text from datafile into list of sentences.
Args:
abstract: string containing <s> and </s> tags for starts and ends of sentences
Returns:
sents: List of sentence strings (no tags)"""
cur = 0
sents = []
while True:
try:
start_p = abstract.index(SENTENCE_START, cur)
end_p = abstract.index(SENTENCE_END, start_p + 1)
cur = end_p + len(SENTENCE_END)
sents.append(abstract[start_p+len(SENTENCE_START):end_p])
except ValueError as e: # no more sentences
return sents
def show_art_oovs(article, vocab):
"""Returns the article string, highlighting the OOVs by placing __underscores__ around them"""
unk_token = vocab.word2id(UNKNOWN_TOKEN)
words = article.split(' ')
words = [("__%s__" % w) if vocab.word2id(w)==unk_token else w for w in words]
out_str = ' '.join(words)
return out_str
def show_abs_oovs(abstract, vocab, article_oovs):
"""Returns the abstract string, highlighting the article OOVs with __underscores__.
If a list of article_oovs is provided, non-article OOVs are differentiated like !!__this__!!.
Args:
abstract: string
vocab: Vocabulary object
article_oovs: list of words (strings), or None (in baseline mode)
"""
unk_token = vocab.word2id(UNKNOWN_TOKEN)
words = abstract.split(' ')
new_words = []
for w in words:
if vocab.word2id(w) == unk_token: # w is oov
if article_oovs is None: # baseline mode
new_words.append("__%s__" % w)
else: # pointer-generator mode
if w in article_oovs:
new_words.append("__%s__" % w)
else:
new_words.append("!!__%s__!!" % w)
else: # w is in-vocab word
new_words.append(w)
out_str = ' '.join(new_words)
return out_str
|
|
# Copyright 2015 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solum Worker handler."""
from solum.common import exception
from solum.conductor import api as conductor_api
from solum.deployer import api as deployer_api
from solum import objects
from solum.objects import assembly
from solum.objects import image
from solum.openstack.common import log as logging
from solum.worker.app_handlers import default as docker_handler
LOG = logging.getLogger(__name__)
ASSEMBLY_STATES = assembly.States
IMAGE_STATES = image.States
def job_update_notification(ctxt, build_id, status=None, description=None,
created_image_id=None, docker_image_name=None,
assembly_id=None):
"""send a status update to the conductor."""
conductor_api.API(context=ctxt).build_job_update(build_id, status,
description,
created_image_id,
docker_image_name,
assembly_id)
def update_assembly_status(ctxt, assembly_id, status):
if assembly_id is None:
return
data = {'status': status}
conductor_api.API(context=ctxt).update_assembly(assembly_id, data)
def update_lp_status(ctxt, image_id, status, external_ref=None,
docker_image_name=None):
if image_id is None:
return
LOG.debug('Updating languagepack %s status to %s and external_ref to %s'
% (image_id, status, external_ref))
conductor_api.API(context=ctxt).update_image(image_id, status,
external_ref,
docker_image_name)
def validate_lp(ctxt, lp_id, assembly_id):
try:
image = objects.registry.Image.get_lp_by_name_or_uuid(
ctxt, lp_id, include_operators_lp=True)
except exception.ObjectNotFound:
LOG.error('LP not found with id %s, assembly id: %s' %
(lp_id, assembly_id))
return
if (not image or not image.project_id or not image.status or
not image.external_ref or not image.docker_image_name or
image.status.lower() != 'ready'):
LOG.warning("Error building due to language pack %s not ready."
" assembly ID: %s" % (lp_id, assembly_id))
return
return image
class Handler(object):
def echo(self, ctxt, message):
LOG.debug("%s" % message)
def launch_workflow(self, ctxt, build_id, git_info, ports, name,
base_image_id, source_format, image_format,
assembly_id, workflow, test_cmd, run_cmd):
try:
assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
except exception.ObjectNotFound:
return
enable_unittest = enable_build = enable_deploy = False
for step in workflow:
if step == 'unittest':
enable_unittest = True
elif step == 'build':
enable_build = True
elif step == 'deploy':
enable_deploy = True
du_image_loc = None
du_image_name = None
with docker_handler.DockerHandler(ctxt, assem, 'custom',
'swift') as lp_handler:
if enable_unittest:
if self._do_unittest(ctxt, lp_handler, build_id, git_info,
name, base_image_id, source_format,
image_format, assembly_id, test_cmd) != 0:
return
if enable_build:
du_image_loc, du_image_name = self._do_build(
ctxt, lp_handler, build_id, git_info, name, base_image_id,
source_format, image_format, assembly_id, run_cmd)
if enable_deploy and du_image_loc and du_image_name:
self._do_deploy(ctxt, assembly_id, ports, du_image_loc,
du_image_name)
def build(self, ctxt, build_id, git_info, name, base_image_id,
source_format, image_format, assembly_id, run_cmd):
try:
assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
except exception.ObjectNotFound:
return
with docker_handler.DockerHandler(ctxt, assem, 'custom',
'swift') as lp_handler:
self._do_build(ctxt, lp_handler, build_id, git_info, name,
base_image_id, source_format, image_format,
assembly_id, run_cmd)
def unittest(self, ctxt, build_id, git_info, name, base_image_id,
source_format, image_format, assembly_id, test_cmd):
try:
assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
except exception.ObjectNotFound:
return
with docker_handler.DockerHandler(ctxt, assem, 'custom',
'swift') as lp_handler:
self._do_unittest(ctxt, lp_handler, build_id, git_info, name,
base_image_id, source_format, image_format,
assembly_id, test_cmd)
def _do_deploy(self, ctxt, assembly_id, ports, du_image_loc,
du_image_name):
deployer_api.API(context=ctxt).deploy(assembly_id=assembly_id,
image_loc=du_image_loc,
image_name=du_image_name,
ports=ports)
def _do_build(self, ctxt, lp_handler, build_id, git_info, name,
base_image_id, source_format, image_format, assembly_id,
run_cmd):
lp = validate_lp(ctxt, base_image_id, assembly_id)
if not lp:
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.ERROR)
job_update_notification(ctxt, build_id, IMAGE_STATES.ERROR,
description='language pack not ready',
assembly_id=assembly_id)
return
# Check if the assembly is deleted or being deleted
try:
assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
if assem.status == ASSEMBLY_STATES.DELETING:
LOG.debug('Assembly %s is being deleted..skipping next stages'
% assembly_id)
return
except exception.ObjectNotFound:
LOG.debug('Assembly %s was deleted, skipping building.' %
assembly_id)
return
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.BUILDING)
image_info = lp_handler.build_app(name, git_info, lp.external_ref,
lp.docker_image_name, run_cmd)
if not image_info:
job_update_notification(ctxt, build_id, IMAGE_STATES.ERROR,
description='image not created',
assembly_id=assembly_id)
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.ERROR)
return
else:
job_update_notification(ctxt, build_id, IMAGE_STATES.READY,
description='built successfully',
created_image_id=image_info[0],
docker_image_name=image_info[1],
assembly_id=assembly_id)
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.BUILT)
return (image_info[0], image_info[1])
def _do_unittest(self, ctxt, lp_handler, build_id, git_info, name,
base_image_id, source_format, image_format, assembly_id,
test_cmd):
if test_cmd is None:
LOG.debug("Unit test command is None; skipping unittests.")
return 0
lp = validate_lp(ctxt, base_image_id, assembly_id)
if not lp:
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.ERROR)
return -1
# Check if the assembly is deleted or being deleted
try:
assem = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
if assem.status == ASSEMBLY_STATES.DELETING:
LOG.debug('Assembly %s is being deleted..skipping next stages'
% assembly_id)
return -1
except exception.ObjectNotFound:
LOG.debug('Assembly %s was deleted, skipping unittesting.' %
assembly_id)
return -1
update_assembly_status(ctxt, assembly_id, ASSEMBLY_STATES.UNIT_TESTING)
result = lp_handler.unittest_app(git_info, lp.external_ref,
lp.docker_image_name, test_cmd)
if result == 0:
status = ASSEMBLY_STATES.UNIT_TESTING_PASSED
elif result > 0:
status = ASSEMBLY_STATES.UNIT_TESTING_FAILED
else:
status = ASSEMBLY_STATES.ERROR
update_assembly_status(ctxt, assembly_id, status)
return result
def build_lp(self, ctxt, image_id, git_info, name, source_format,
image_format, artifact_type):
try:
lp = objects.registry.Image.get_by_id(ctxt, image_id)
except exception.ObjectNotFound:
LOG.error('Image object not found with id %s' % image_id)
return
update_lp_status(ctxt, image_id, IMAGE_STATES.BUILDING)
lp.type = 'languagepack'
image_info = None
with docker_handler.DockerHandler(ctxt, lp, 'custom', 'swift') as lph:
image_info = lph.build_lp(name, git_info)
if image_info:
status = IMAGE_STATES.READY
update_lp_status(ctxt, image_id, status, image_info[0],
image_info[1])
else:
status = IMAGE_STATES.ERROR
update_lp_status(ctxt, image_id, status)
|
|
from __future__ import print_function, division, absolute_import
from collections import namedtuple
import sys
from llvmlite.ir import Value
from llvmlite.llvmpy.core import Constant, Type, Builder
from . import (_dynfunc, cgutils, config, funcdesc, generators, ir, types,
typing, utils)
from .errors import LoweringError
class Environment(_dynfunc.Environment):
__slots__ = ()
@classmethod
def from_fndesc(cls, fndesc):
mod = fndesc.lookup_module()
return cls(mod.__dict__)
def __reduce__(self):
return _rebuild_env, (self.globals['__name__'], self.consts)
def _rebuild_env(modname, consts):
from . import serialize
mod = serialize._rebuild_module(modname)
env = Environment(mod.__dict__)
env.consts[:] = consts
return env
_VarArgItem = namedtuple("_VarArgItem", ("vararg", "index"))
class BaseLower(object):
"""
Lower IR to LLVM
"""
# If true, then can't cache LLVM module accross process calls
has_dynamic_globals = False
def __init__(self, context, library, fndesc, interp):
self.context = context
self.library = library
self.fndesc = fndesc
self.blocks = utils.SortedMap(utils.iteritems(interp.blocks))
self.interp = interp
self.call_conv = context.call_conv
self.generator_info = self.interp.generator_info
# Initialize LLVM
self.module = self.library.create_ir_module(self.fndesc.unique_name)
# Python execution environment (will be available to the compiled
# function).
self.env = Environment.from_fndesc(self.fndesc)
# Internal states
self.blkmap = {}
self.varmap = {}
self.firstblk = min(self.blocks.keys())
self.loc = -1
# Subclass initialization
self.init()
def init(self):
pass
def init_pyapi(self):
"""
Init the Python API and Environment Manager for the function being
lowered.
"""
if self.pyapi is not None:
return
self.pyapi = self.context.get_python_api(self.builder)
# Store environment argument for later use
self.envarg = self.call_conv.get_env_argument(self.function)
# Sanity check
with cgutils.if_unlikely(self.builder,
cgutils.is_null(self.builder, self.envarg)):
self.pyapi.err_set_string(
"PyExc_SystemError",
"Numba internal error: object mode function called "
"without an environment")
self.call_conv.return_exc(self.builder)
self.env_body = self.context.get_env_body(self.builder, self.envarg)
self.pyapi.emit_environment_sentry(self.envarg)
self.env_manager = self.pyapi.get_env_manager(self.env, self.env_body,
self.envarg)
def pre_lower(self):
"""
Called before lowering all blocks.
"""
# A given Lower object can be used for several LL functions
# (for generators) and it's important to use a new API and
# EnvironmentManager.
self.pyapi = None
def post_lower(self):
"""
Called after all blocks are lowered
"""
def pre_block(self, block):
"""
Called before lowering a block.
"""
def return_exception(self, exc_class, exc_args=None):
self.call_conv.return_user_exc(self.builder, exc_class, exc_args)
def lower(self):
if self.generator_info is None:
self.genlower = None
self.lower_normal_function(self.fndesc)
else:
self.genlower = self.GeneratorLower(self)
self.gentype = self.genlower.gentype
self.genlower.lower_init_func(self)
self.genlower.lower_next_func(self)
if self.gentype.has_finalizer:
self.genlower.lower_finalize_func(self)
if config.DUMP_LLVM:
print(("LLVM DUMP %s" % self.fndesc).center(80, '-'))
print(self.module)
print('=' * 80)
# Run target specific post lowering transformation
self.context.post_lowering(self.module, self.library)
# Materialize LLVM Module
self.library.add_ir_module(self.module)
def extract_function_arguments(self):
rawfnargs = self.call_conv.get_arguments(self.function)
arginfo = self.context.get_arg_packer(self.fndesc.argtypes)
self.fnargs = arginfo.from_arguments(self.builder, rawfnargs)
return self.fnargs
def lower_normal_function(self, fndesc):
"""
Lower non-generator *fndesc*.
"""
self.setup_function(fndesc)
# Init argument values
self.extract_function_arguments()
entry_block_tail = self.lower_function_body()
# Close tail of entry block
self.builder.position_at_end(entry_block_tail)
self.builder.branch(self.blkmap[self.firstblk])
def lower_function_body(self):
"""
Lower the current function's body, and return the entry block.
"""
# Init Python blocks
for offset in self.blocks:
bname = "B%s" % offset
self.blkmap[offset] = self.function.append_basic_block(bname)
self.pre_lower()
# pre_lower() may have changed the current basic block
entry_block_tail = self.builder.basic_block
self.debug_print("# function begin: {0}".format(
self.fndesc.unique_name))
# Lower all blocks
for offset, block in self.blocks.items():
bb = self.blkmap[offset]
self.builder.position_at_end(bb)
self.lower_block(block)
self.post_lower()
return entry_block_tail
def lower_block(self, block):
"""
Lower the given block.
"""
self.pre_block(block)
for inst in block.body:
self.loc = inst.loc
try:
self.lower_inst(inst)
except LoweringError:
raise
except Exception as e:
msg = "Internal error:\n%s: %s" % (type(e).__name__, e)
raise LoweringError(msg, inst.loc)
def create_cpython_wrapper(self, release_gil=False):
"""
Create CPython wrapper(s) around this function (or generator).
"""
if self.genlower:
self.context.create_cpython_wrapper(self.library,
self.genlower.gendesc,
self.env, self.call_helper,
release_gil=release_gil)
self.context.create_cpython_wrapper(self.library, self.fndesc,
self.env, self.call_helper,
release_gil=release_gil)
def setup_function(self, fndesc):
# Setup function
self.function = self.context.declare_function(self.module, fndesc)
self.entry_block = self.function.append_basic_block('entry')
self.builder = Builder.new(self.entry_block)
self.call_helper = self.call_conv.init_call_helper(self.builder)
def typeof(self, varname):
return self.fndesc.typemap[varname]
def debug_print(self, msg):
if config.DEBUG_JIT:
self.context.debug_print(self.builder, "DEBUGJIT: {0}".format(msg))
class Lower(BaseLower):
GeneratorLower = generators.GeneratorLower
def lower_inst(self, inst):
self.debug_print(str(inst))
if isinstance(inst, ir.Assign):
ty = self.typeof(inst.target.name)
val = self.lower_assign(ty, inst)
self.storevar(val, inst.target.name)
elif isinstance(inst, ir.Branch):
cond = self.loadvar(inst.cond.name)
tr = self.blkmap[inst.truebr]
fl = self.blkmap[inst.falsebr]
condty = self.typeof(inst.cond.name)
pred = self.context.cast(self.builder, cond, condty, types.boolean)
assert pred.type == Type.int(1), ("cond is not i1: %s" % pred.type)
self.builder.cbranch(pred, tr, fl)
elif isinstance(inst, ir.Jump):
target = self.blkmap[inst.target]
self.builder.branch(target)
elif isinstance(inst, ir.Return):
if self.generator_info:
# StopIteration
self.genlower.return_from_generator(self)
return
val = self.loadvar(inst.value.name)
oty = self.typeof(inst.value.name)
ty = self.fndesc.restype
if isinstance(ty, types.Optional):
# If returning an optional type
self.call_conv.return_optional_value(self.builder, ty, oty, val)
return
if ty != oty:
val = self.context.cast(self.builder, val, oty, ty)
retval = self.context.get_return_value(self.builder, ty, val)
self.call_conv.return_value(self.builder, retval)
elif isinstance(inst, ir.SetItem):
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
index = self.loadvar(inst.index.name)
targetty = self.typeof(inst.target.name)
valuety = self.typeof(inst.value.name)
indexty = self.typeof(inst.index.name)
signature = self.fndesc.calltypes[inst]
assert signature is not None
impl = self.context.get_function('setitem', signature)
# Convert argument to match
if isinstance(targetty, types.Optional):
target = self.context.cast(self.builder, target, targetty,
targetty.type)
else:
assert targetty == signature.args[0]
index = self.context.cast(self.builder, index, indexty,
signature.args[1])
value = self.context.cast(self.builder, value, valuety,
signature.args[2])
return impl(self.builder, (target, index, value))
elif isinstance(inst, ir.DelItem):
target = self.loadvar(inst.target.name)
index = self.loadvar(inst.index.name)
targetty = self.typeof(inst.target.name)
indexty = self.typeof(inst.index.name)
signature = self.fndesc.calltypes[inst]
assert signature is not None
impl = self.context.get_function('delitem', signature)
assert targetty == signature.args[0]
index = self.context.cast(self.builder, index, indexty,
signature.args[1])
return impl(self.builder, (target, index))
elif isinstance(inst, ir.Del):
try:
# XXX: incorrect Del injection?
val = self.loadvar(inst.value)
except KeyError:
pass
else:
self.decref(self.typeof(inst.value), val)
self._delete_variable(inst.value)
elif isinstance(inst, ir.SetAttr):
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
signature = self.fndesc.calltypes[inst]
targetty = self.typeof(inst.target.name)
valuety = self.typeof(inst.value.name)
assert signature is not None
assert signature.args[0] == targetty
impl = self.context.get_setattr(inst.attr, signature)
# Convert argument to match
value = self.context.cast(self.builder, value, valuety,
signature.args[1])
return impl(self.builder, (target, value))
elif isinstance(inst, ir.Raise):
self.lower_raise(inst)
else:
raise NotImplementedError(type(inst))
def lower_raise(self, inst):
if inst.exception is None:
# Reraise
self.return_exception(None)
else:
exctype = self.typeof(inst.exception.name)
if isinstance(exctype, types.ExceptionInstance):
# raise <instance> => find the instantiation site
excdef = self.interp.get_definition(inst.exception)
if (not isinstance(excdef, ir.Expr) or excdef.op != 'call'
or excdef.kws):
raise NotImplementedError("unsupported kind of raising")
# Try to infer the args tuple
args = tuple(self.interp.get_definition(arg).infer_constant()
for arg in excdef.args)
elif isinstance(exctype, types.ExceptionClass):
args = None
else:
raise NotImplementedError("cannot raise value of type %s"
% (exctype,))
self.return_exception(exctype.exc_class, args)
def lower_assign(self, ty, inst):
value = inst.value
# In nopython mode, closure vars are frozen like globals
if isinstance(value, (ir.Const, ir.Global, ir.FreeVar)):
if isinstance(ty, types.ExternalFunctionPointer):
res = self.context.get_constant_generic(self.builder, ty,
value.value)
self.has_dynamic_globals = True
elif isinstance(ty, types.Dummy):
res = self.context.get_dummy_value()
elif isinstance(ty, types.Array):
res = self.context.make_constant_array(self.builder, ty,
value.value)
else:
res = self.context.get_constant_generic(self.builder, ty,
value.value)
self.incref(ty, res)
return res
elif isinstance(value, ir.Expr):
return self.lower_expr(ty, value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
oty = self.typeof(value.name)
res = self.context.cast(self.builder, val, oty, ty)
self.incref(ty, res)
return res
elif isinstance(value, ir.Arg):
res = self.fnargs[value.index]
self.incref(ty, res)
return res
elif isinstance(value, ir.Yield):
res = self.lower_yield(ty, value)
self.incref(ty, res)
return res
raise NotImplementedError(type(value), value)
def lower_yield(self, retty, inst):
yp = self.generator_info.yield_points[inst.index]
assert yp.inst is inst
y = generators.LowerYield(self, yp, yp.live_vars)
y.lower_yield_suspend()
# Yield to caller
val = self.loadvar(inst.value.name)
typ = self.typeof(inst.value.name)
val = self.context.cast(self.builder, val, typ, self.gentype.yield_type)
self.call_conv.return_value(self.builder, val)
# Resumption point
y.lower_yield_resume()
# None is returned by the yield expression
return self.context.get_constant_generic(self.builder, retty, None)
def lower_binop(self, resty, expr, op):
lhs = expr.lhs
rhs = expr.rhs
lty = self.typeof(lhs.name)
rty = self.typeof(rhs.name)
lhs = self.loadvar(lhs.name)
rhs = self.loadvar(rhs.name)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(op, signature)
# Convert argument to match
lhs = self.context.cast(self.builder, lhs, lty, signature.args[0])
rhs = self.context.cast(self.builder, rhs, rty, signature.args[1])
res = impl(self.builder, (lhs, rhs))
return self.context.cast(self.builder, res,
signature.return_type, resty)
def _cast_var(self, var, ty):
"""
Cast a Numba IR variable to the given Numba type, returning a
low-level value.
"""
if isinstance(var, _VarArgItem):
varty = self.typeof(var.vararg.name)[var.index]
val = self.builder.extract_value(self.loadvar(var.vararg.name),
var.index)
else:
varty = self.typeof(var.name)
val = self.loadvar(var.name)
return self.context.cast(self.builder, val, varty, ty)
def lower_call(self, resty, expr):
signature = self.fndesc.calltypes[expr]
if isinstance(signature.return_type, types.Phantom):
return self.context.get_dummy_value()
if isinstance(expr.func, ir.Intrinsic):
fnty = expr.func.name
argvals = expr.func.args
else:
fnty = self.typeof(expr.func.name)
pos_args = expr.args
if expr.vararg:
# Inject *args from function call
# The lowering will be done in _cast_var() above.
tp_vararg = self.typeof(expr.vararg.name)
assert isinstance(tp_vararg, types.BaseTuple)
pos_args = pos_args + [_VarArgItem(expr.vararg, i)
for i in range(len(tp_vararg))]
# Fold keyword arguments and resolve default argument values
pysig = signature.pysig
if pysig is None:
if expr.kws:
raise NotImplementedError("unsupported keyword arguments "
"when calling %s" % (fnty,))
argvals = [self._cast_var(var, sigty)
for var, sigty in zip(pos_args, signature.args)]
else:
def normal_handler(index, param, var):
return self._cast_var(var, signature.args[index])
def default_handler(index, param, default):
return self.context.get_constant_generic(
self.builder, signature.args[index], default)
def stararg_handler(index, param, vars):
values = [self._cast_var(var, sigty)
for var, sigty in
zip(vars, signature.args[index])]
return cgutils.make_anonymous_struct(self.builder, values)
argvals = typing.fold_arguments(pysig,
pos_args, dict(expr.kws),
normal_handler,
default_handler,
stararg_handler)
if isinstance(fnty, types.ExternalFunction):
# Handle a named external function
self.debug_print("# external function")
fndesc = funcdesc.ExternalFunctionDescriptor(
fnty.symbol, fnty.sig.return_type, fnty.sig.args)
func = self.context.declare_external_function(self.builder.module,
fndesc)
res = self.context.call_external_function(
self.builder, func, fndesc.argtypes, argvals)
elif isinstance(fnty, types.NumbaFunction):
# Handle a compiled Numba function
self.debug_print("# calling numba function")
res = self.context.call_internal(self.builder, fnty.fndesc,
fnty.sig, argvals)
elif isinstance(fnty, types.ExternalFunctionPointer):
self.debug_print("# calling external function pointer")
# Handle a C function pointer
pointer = self.loadvar(expr.func.name)
# If the external function pointer uses libpython
if fnty.requires_gil:
self.init_pyapi()
# Acquire the GIL
gil_state = self.pyapi.gil_ensure()
# Make PyObjects
newargvals = []
pyvals = []
for exptyp, gottyp, aval in zip(fnty.sig.args, signature.args,
argvals):
# Adjust argument values to pyobjects
if exptyp == types.ffi_forced_object:
self.incref(gottyp, aval)
obj = self.pyapi.from_native_value(gottyp, aval,
self.env_manager)
newargvals.append(obj)
pyvals.append(obj)
else:
newargvals.append(aval)
# Call external function
res = self.context.call_function_pointer(self.builder, pointer,
newargvals, fnty.cconv)
# Release PyObjects
for obj in pyvals:
self.pyapi.decref(obj)
# Release the GIL
self.pyapi.gil_release(gil_state)
# If the external function pointer does NOT use libpython
else:
res = self.context.call_function_pointer(self.builder, pointer,
argvals, fnty.cconv)
else:
# Normal function resolution (for Numba-compiled functions)
self.debug_print("# calling normal function: {0}".format(fnty))
impl = self.context.get_function(fnty, signature)
if signature.recvr:
# The "self" object is passed as the function object
# for bounded function
the_self = self.loadvar(expr.func.name)
# Prepend the self reference
argvals = [the_self] + list(argvals)
res = impl(self.builder, argvals)
libs = getattr(impl, "libs", ())
for lib in libs:
self.library.add_linking_library(lib)
return self.context.cast(self.builder, res, signature.return_type,
resty)
def lower_expr(self, resty, expr):
if expr.op == 'binop':
return self.lower_binop(resty, expr, expr.fn)
elif expr.op == 'inplace_binop':
lty = self.typeof(expr.lhs.name)
if lty.mutable:
return self.lower_binop(resty, expr, expr.fn)
else:
# inplace operators on non-mutable types reuse the same
# definition as the corresponding copying operators.
return self.lower_binop(resty, expr, expr.immutable_fn)
elif expr.op == 'unary':
val = self.loadvar(expr.value.name)
typ = self.typeof(expr.value.name)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.fn, signature)
# Convert argument to match
val = self.context.cast(self.builder, val, typ, signature.args[0])
res = impl(self.builder, [val])
res = self.context.cast(self.builder, res,
signature.return_type, resty)
return res
elif expr.op == 'call':
res = self.lower_call(resty, expr)
return res
elif expr.op == 'pair_first':
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
res = self.context.pair_first(self.builder, val, ty)
self.incref(resty, res)
return res
elif expr.op == 'pair_second':
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
res = self.context.pair_second(self.builder, val, ty)
self.incref(resty, res)
return res
elif expr.op in ('getiter', 'iternext'):
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.op, signature)
[fty] = signature.args
castval = self.context.cast(self.builder, val, ty, fty)
res = impl(self.builder, (castval,))
res = self.context.cast(self.builder, res, signature.return_type,
resty)
return res
elif expr.op == 'exhaust_iter':
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
# If we have a tuple, we needn't do anything
# (and we can't iterate over the heterogenous ones).
if isinstance(ty, types.BaseTuple):
assert ty == resty
self.incref(ty, val)
return val
itemty = ty.iterator_type.yield_type
tup = self.context.get_constant_undef(resty)
pairty = types.Pair(itemty, types.boolean)
getiter_sig = typing.signature(ty.iterator_type, ty)
getiter_impl = self.context.get_function('getiter',
getiter_sig)
iternext_sig = typing.signature(pairty, ty.iterator_type)
iternext_impl = self.context.get_function('iternext',
iternext_sig)
iterobj = getiter_impl(self.builder, (val,))
# We call iternext() as many times as desired (`expr.count`).
for i in range(expr.count):
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder,
pair, pairty)
with cgutils.if_unlikely(self.builder,
self.builder.not_(is_valid)):
self.return_exception(ValueError)
item = self.context.pair_first(self.builder,
pair, pairty)
tup = self.builder.insert_value(tup, item, i)
# Call iternext() once more to check that the iterator
# is exhausted.
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder,
pair, pairty)
with cgutils.if_unlikely(self.builder, is_valid):
self.return_exception(ValueError)
self.decref(ty.iterator_type, iterobj)
return tup
elif expr.op == "getattr":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
if isinstance(resty, types.BoundFunction):
# if we are getting out a method, assume we have typed this
# properly and just build a bound function object
res = self.context.get_bound_function(self.builder, val, ty)
self.incref(resty, res)
return res
else:
impl = self.context.get_attribute(val, ty, expr.attr)
if impl is None:
# ignore the attribute
return self.context.get_dummy_value()
else:
res = impl(self.context, self.builder, ty, val, expr.attr)
return res
elif expr.op == "static_getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.context.get_constant(types.intp, expr.index)
if cgutils.is_struct(baseval.type):
# Statically extract the given element from the structure
# (structures aren't dynamically indexable).
res = self.builder.extract_value(baseval, expr.index)
self.incref(resty, res)
return res
else:
# Fall back on the generic getitem() implementation
# for this type.
signature = typing.signature(resty,
self.typeof(expr.value.name),
types.intp)
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
res = impl(self.builder, argvals)
return self.context.cast(self.builder, res,
signature.return_type, resty)
elif expr.op == "getitem":
baseval = self.loadvar(expr.value.name)
indexval = self.loadvar(expr.index.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function("getitem", signature)
argvals = (baseval, indexval)
argtyps = (self.typeof(expr.value.name),
self.typeof(expr.index.name))
castvals = [self.context.cast(self.builder, av, at, ft)
for av, at, ft in zip(argvals, argtyps,
signature.args)]
res = impl(self.builder, castvals)
return self.context.cast(self.builder, res,
signature.return_type,
resty)
elif expr.op == "build_tuple":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [self.context.cast(self.builder, val, fromty, toty)
for val, toty, fromty in zip(itemvals, resty, itemtys)]
tup = self.context.make_tuple(self.builder, resty, castvals)
self.incref(resty, tup)
return tup
elif expr.op == "build_list":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [self.context.cast(self.builder, val, fromty, resty.dtype)
for val, fromty in zip(itemvals, itemtys)]
return self.context.build_list(self.builder, resty, castvals)
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
castval = self.context.cast(self.builder, val, ty, resty)
self.incref(resty, castval)
return castval
elif expr.op in self.context.special_ops:
res = self.context.special_ops[expr.op](self, expr)
return res
raise NotImplementedError(expr)
def getvar(self, name):
return self.varmap[name]
def loadvar(self, name):
ptr = self.getvar(name)
return self.builder.load(ptr)
def storevar(self, value, name):
fetype = self.typeof(name)
# Define if not already
if name not in self.varmap:
# If not already defined, allocate it
llty = self.context.get_value_type(fetype)
ptr = self.alloca_lltype(name, llty)
# Remember the pointer
self.varmap[name] = ptr
# Clean up existing value stored in the variable
old = self.loadvar(name)
self.decref(fetype, old)
# Store variable
ptr = self.getvar(name)
if value.type != ptr.type.pointee:
msg = ("Storing {value.type} to ptr of {ptr.type.pointee} ('{name}'). "
"FE type {fetype}").format(value=value, ptr=ptr,
fetype=fetype, name=name)
raise AssertionError(msg)
self.builder.store(value, ptr)
def alloca(self, name, type):
lltype = self.context.get_value_type(type)
return self.alloca_lltype(name, lltype)
def alloca_lltype(self, name, lltype):
return cgutils.alloca_once(self.builder, lltype, name=name, zfill=True)
def incref(self, typ, val):
if not self.context.enable_nrt:
return
self.context.nrt_incref(self.builder, typ, val)
def decref(self, typ, val):
if not self.context.enable_nrt:
return
self.context.nrt_decref(self.builder, typ, val)
def _delete_variable(self, varname):
"""
Zero-fill variable to avoid crashing due to extra ir.Del
"""
storage = self.getvar(varname)
self.builder.store(Constant.null(storage.type.pointee), storage)
|
|
"""
@package mi.instrument.harvard.massp.rga.test.test_driver
@file marine-integrations/mi/instrument/harvard/massp/rga/driver.py
@author Peter Cable
@brief Test cases for rga driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Peter Cable'
__license__ = 'Apache 2.0'
import struct
import time
import unittest
import ntplib
from nose.plugins.attrib import attr
from mock import Mock, call
from mi.idk.unit_test import InstrumentDriverTestCase, ParameterTestConfigKey
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.core.exceptions import InstrumentStateException
from mi.core.exceptions import InstrumentCommandException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.instrument.data_particle import RawDataParticle, CommonDataParticleType
from mi.core.instrument.instrument_driver import DriverConfigKey, ResourceAgentState
from mi.core.instrument.port_agent_client import PortAgentPacket
from mi.instrument.harvard.massp.rga.driver import InstrumentDriver
from mi.instrument.harvard.massp.rga.driver import RGAStatusParticleKey
from mi.instrument.harvard.massp.rga.driver import RGASampleParticleKey
from mi.instrument.harvard.massp.rga.driver import ParameterConstraints
from mi.instrument.harvard.massp.rga.driver import DataParticleType
from mi.instrument.harvard.massp.rga.driver import InstrumentCommand
from mi.instrument.harvard.massp.rga.driver import ProtocolState
from mi.instrument.harvard.massp.rga.driver import ProtocolEvent
from mi.instrument.harvard.massp.rga.driver import Capability
from mi.instrument.harvard.massp.rga.driver import Parameter
from mi.instrument.harvard.massp.rga.driver import Protocol
from mi.instrument.harvard.massp.rga.driver import Prompt
from mi.instrument.harvard.massp.rga.driver import NEWLINE
from mi.core.log import get_logger
log = get_logger()
rga_startup_config = {
DriverConfigKey.PARAMETERS: {
Parameter.EE: 70,
Parameter.IE: 1,
Parameter.VF: 90,
Parameter.NF: 3,
Parameter.SA: 10,
Parameter.MI: 1,
Parameter.MF: 100,
Parameter.FL: 1.0,
Parameter.HV: 0,
}
}
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.harvard.massp.rga.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='YAQ3KY',
instrument_agent_name='harvard_massp_rga',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config=rga_startup_config
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(DriverTestMixin):
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
def assert_sample_data_particle(self, data_particle):
"""
Verify a particle is a know particle to this driver and verify the particle is
correct
@param data_particle: Data particle of unknown type produced by the driver
"""
if isinstance(data_particle, RawDataParticle):
self.assert_particle_raw(data_particle)
else:
self.fail("Unknown Particle Detected: %s" % data_particle)
responses = {
'IN0': 0,
'EE70': 0,
'EE?': 70,
'IE1': 0,
'IE?': 1,
'VF90': 0,
'VF?': 90,
'NF?': 3,
'SA?': 10,
'MI?': 1,
'MF?': 100,
'FL1.0': 0,
'FL?': 0.9976,
'AP?': 251,
'ID?': 'FAKEID',
'SC1': '\xba\xdd\xca\xfe' * 252,
'ER?': 0,
'HV?': 0,
'HV0': 0,
}
sample_data = struct.unpack('<252i', responses['SC1'])
_sample_parameters = {
RGASampleParticleKey.SCAN_DATA: {TYPE: tuple, VALUE: sample_data, REQUIRED: True},
}
_status_parameters = {
RGAStatusParticleKey.ID: {TYPE: unicode, VALUE: responses['ID?'], REQUIRED: True},
RGAStatusParticleKey.EE: {TYPE: int, VALUE: responses['EE?'], REQUIRED: True},
RGAStatusParticleKey.IE: {TYPE: int, VALUE: responses['IE?'], REQUIRED: True},
RGAStatusParticleKey.VF: {TYPE: int, VALUE: responses['VF?'], REQUIRED: True},
RGAStatusParticleKey.NF: {TYPE: int, VALUE: responses['NF?'], REQUIRED: True},
RGAStatusParticleKey.ER: {TYPE: int, VALUE: responses['ER?'], REQUIRED: True},
RGAStatusParticleKey.SA: {TYPE: int, VALUE: responses['SA?'], REQUIRED: True},
RGAStatusParticleKey.MI: {TYPE: int, VALUE: responses['MI?'], REQUIRED: True},
RGAStatusParticleKey.MF: {TYPE: int, VALUE: responses['MF?'], REQUIRED: True},
RGAStatusParticleKey.AP: {TYPE: int, VALUE: responses['AP?'], REQUIRED: True},
RGAStatusParticleKey.HV: {TYPE: int, VALUE: responses['HV?'], REQUIRED: True},
RGAStatusParticleKey.FL: {TYPE: float, VALUE: 1.0, REQUIRED: True},
RGAStatusParticleKey.FL_ACTUAL: {TYPE: float, VALUE: responses['FL?'], REQUIRED: True},
}
_driver_parameters = {
# Parameters defined in the IOS
Parameter.ID: {TYPE: str, READONLY: True, DA: False, STARTUP: False},
Parameter.AP: {TYPE: int, READONLY: True, DA: False, STARTUP: False},
Parameter.ER: {TYPE: int, READONLY: True, DA: False, STARTUP: False},
Parameter.EE: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.IE: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.VF: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.NF: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.SA: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.MI: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.MF: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.HV: {TYPE: int, READONLY: False, DA: False, STARTUP: True},
Parameter.FL: {TYPE: float, READONLY: False, DA: False, STARTUP: True},
Parameter.FL_ACTUAL: {TYPE: float, READONLY: True, DA: False, STARTUP: True},
Parameter.ERROR_REASON: {TYPE: str, READONLY: True, DA: False, STARTUP: False},
}
_driver_capabilities = {
# capabilities defined in the IOS
Capability.DISCOVER: {STATES: [ProtocolState.UNKNOWN]},
Capability.START_SCAN: {STATES: [ProtocolState.COMMAND]},
Capability.STOP_SCAN: {STATES: [ProtocolState.SCAN]},
Capability.CLEAR: {STATES: [ProtocolState.ERROR]},
}
_capabilities = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'PROTOCOL_EVENT_START_SCAN'],
ProtocolState.SCAN: ['PROTOCOL_EVENT_TAKE_SCAN',
'PROTOCOL_EVENT_STOP_SCAN',
'PROTOCOL_EVENT_TIMEOUT',
'PROTOCOL_EVENT_ERROR'],
ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT', 'EXECUTE_DIRECT'],
ProtocolState.ERROR: ['PROTOCOL_EVENT_CLEAR', 'DRIVER_EVENT_GET']
}
def _send_port_agent_packet(self, driver, data):
"""
Send the supplied data to the driver in a port agent packet
@param driver: instrument driver instance
@param data: data to be sent
"""
ts = ntplib.system_to_ntp_time(time.time())
port_agent_packet = PortAgentPacket()
port_agent_packet.attach_data(data)
port_agent_packet.attach_timestamp(ts)
port_agent_packet.pack_header()
# Push the response into the driver
driver._protocol.got_data(port_agent_packet)
# sleep briefly, as some state changes happen asynchronously, this should give those
# threads time to finish
time.sleep(.01)
def my_send(self, driver):
"""
Side effect function generator - will send responses based on input
@param driver Instrument driver instance
@returns side effect function
"""
def inner(data):
"""
Inner function for side effect generator
@param data Data to send
@returns length of response
"""
data = data.replace(NEWLINE, '')
log.trace('my_send data: %r', data)
my_response = str(self.responses.get(data))
if my_response is not None:
log.trace("my_send: data: %r, my_response: %r", data, my_response)
# scans repeat over and over, sleep between them to prevent overloading cpu
if data == 'SC1':
time.sleep(0.9)
self._send_port_agent_packet(driver, my_response + '\n' + NEWLINE)
return len(my_response)
return inner
def assert_rga_sample_particle(self, particle, verify_values=False):
log.debug('assert_rga_sample_particle: %r', particle)
self.assert_data_particle_keys(RGASampleParticleKey, self._sample_parameters)
self.assert_data_particle_header(particle, DataParticleType.RGA_SAMPLE)
self.assert_data_particle_parameters(particle, self._sample_parameters, verify_values)
def assert_rga_status_particle(self, particle, verify_values=False):
log.debug('assert_rga_status_particle: %r', particle)
self.assert_data_particle_keys(RGAStatusParticleKey, self._status_parameters)
self.assert_data_particle_header(particle, DataParticleType.RGA_STATUS)
self.assert_data_particle_parameters(particle, self._status_parameters, verify_values)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
# noinspection PyProtectedMember
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_connect(self, initial_protocol_state=ProtocolState.COMMAND):
"""
Verify driver can transition to the COMMAND state
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, initial_protocol_state)
driver._connection.send.side_effect = self.my_send(driver)
driver._protocol.set_init_params(rga_startup_config)
driver._protocol._init_params()
return driver
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilities
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
# Test capabilities for duplicates, them verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self._capabilities)
def test_start_scan(self):
"""
Send a start scan event to the driver.
Use the side_effect on send from the mock port agent to simulate instrument responses.
This checks the chunker and particle generation, since the chunker and particles are
dynamic based on instrument parameters.
"""
driver = self.test_connect()
self.clear_data_particle_queue()
driver._protocol._protocol_fsm.on_event(Capability.START_SCAN)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.SCAN)
particles = []
# loop, because the monkey patched time doesn't reliably sleep long enough...
now = time.time()
while time.time() < (now+5):
time.sleep(1)
for particle_dict in self._data_particle_received:
stream_type = particle_dict.get('stream_name')
self.assertIsNotNone(stream_type)
if stream_type != CommonDataParticleType.RAW:
particles.append((particle_dict, stream_type))
log.debug("Non raw particles: %s ", particles)
self.assertGreaterEqual(len(particles), 1)
for p, stream_name in particles:
if stream_name == DataParticleType.RGA_STATUS:
self.assert_rga_status_particle(p, True)
else:
self.assert_rga_sample_particle(p, True)
def test_sample_missing_data(self):
"""
Send a start scan event to the driver, but don't return enough data. Verify that no
sample particle is produced but the driver starts another scan.
"""
orig_scan = self.responses['SC1']
self.responses['SC1'] = 'this is a bad scan, man!'
driver = self.test_connect()
# side effect for our Mocked on_event
def my_on_event(event):
log.debug('my_on_event: event: %r', event)
driver._protocol._protocol_fsm.on_event_actual(event)
# swap out on_event with a Mock object now
on_event_mock = Mock()
on_event_mock.side_effect = my_on_event
driver._protocol._protocol_fsm.on_event_actual = driver._protocol._protocol_fsm.on_event
driver._protocol._protocol_fsm.on_event = on_event_mock
driver._protocol._protocol_fsm.on_event(Capability.START_SCAN)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.SCAN)
# clear the particle queue to remove the status particle
self.clear_data_particle_queue()
# sleep a bit
time.sleep(15)
# check for the correct calls
on_event_mock.assert_has_calls([call(Capability.START_SCAN),
call(Capability.TAKE_SCAN),
call(ProtocolEvent.TIMEOUT)])
self.responses['SC1'] = orig_scan
# check there are no particles
self.assertEqual(len(self._data_particle_received), 0)
def test_error_byte(self):
"""
Respond with an error and verify the FSM transitions to an error state.
"""
driver = self.test_connect()
# set up responses to return an error when the filament is enabled
self.responses['FL1.0'] = 1
try:
driver._protocol._protocol_fsm.on_event(Capability.START_SCAN)
self.assertTrue(False, msg='Failed to raise an exception when the error byte was set')
except InstrumentStateException:
# we threw an exception as expected.
pass
finally:
# restore responses so other tests don't fail!
self.responses['FL1.0'] = 0
# make sure we moved to the ERROR state
time.sleep(.1)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.ERROR)
# clear the error, assert we moved back to COMMAND
driver._protocol._protocol_fsm.on_event(Capability.CLEAR)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, NEWLINE, mock_callback)
driver_capabilities = Capability.list()
test_capabilities = Capability.list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(InstrumentDriverIntegrationTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def test_take_scan(self):
"""
Start a scan and verify status and sample particles are generated.
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START_SCAN)
self.assert_state_change(ProtocolState.SCAN, 5)
self.assert_async_particle_generation(DataParticleType.RGA_STATUS, self.assert_rga_status_particle)
self.assert_async_particle_generation(DataParticleType.RGA_SAMPLE, self.assert_rga_sample_particle, 2, 600)
self.assert_driver_command(Capability.STOP_SCAN)
@unittest.skip("This takes a very long time... Don't run it unless you mean it!")
def test_scan_parameters(self):
"""
Step through a sequence of configuration parameters to test scan timing. Data is in confluence.
"""
self.assert_initialize_driver()
self.assert_set(Parameter.MI, 5, no_get=True)
for mf in range(10, 100, 5):
self.assert_set(Parameter.MF, mf, no_get=True)
for nf in range(1, 8):
self.clear_events()
self.assert_set(Parameter.NF, nf, no_get=True)
self.assert_driver_command(Capability.START_SCAN)
self.assert_state_change(ProtocolState.SCAN, 5)
self.assert_async_particle_generation(DataParticleType.RGA_STATUS, Mock())
self.assert_async_particle_generation(DataParticleType.RGA_SAMPLE, Mock(), 2, 900)
self.assert_driver_command(Capability.STOP_SCAN)
self.assert_state_change(ProtocolState.COMMAND, 5)
# while this is an integration test, it can be run without access to the instrument
def test_get_parameters(self):
"""
Verify we can get all parameters
"""
self.assert_initialize_driver()
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
for key, value in startup_params.items():
self.assert_get(key, value)
# while this is an integration test, it can be run without access to the instrument
def test_set_parameters(self):
"""
Verify we can set all parameters
"""
self.assert_initialize_driver()
constraints = ParameterConstraints.dict()
parameters = Parameter.reverse_dict()
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
for key, value in startup_params.iteritems():
if key in parameters and parameters[key] in constraints:
_, minimum, maximum = constraints[parameters[key]]
self.assert_set(key, maximum-1)
else:
self.assert_set(key, value + 1)
self.assert_set_bulk(startup_params)
# while this is an integration test, it can be run without access to the instrument
def test_out_of_range(self):
"""
Verify out of range values raise exceptions
"""
self.assert_initialize_driver()
constraints = ParameterConstraints.dict()
parameters = Parameter.dict()
log.debug(constraints)
for key in constraints:
_, minimum, maximum = constraints[key]
parameter = parameters[key]
self.assert_set_exception(parameter, minimum - 1)
self.assert_set_exception(parameter, maximum + 1)
self.assert_set_exception(parameter, "strings aren't valid here!")
def test_set_bogus_parameter(self):
"""
Verify bogus parameters raise exceptions
"""
self.assert_initialize_driver()
self.assert_set_exception('BOGUS', 'CHEESE')
def test_state_transitions(self):
"""
Verify state transitions
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START_SCAN)
self.assert_state_change(ProtocolState.SCAN, 5)
self.assert_driver_command(Capability.STOP_SCAN)
self.assert_state_change(ProtocolState.COMMAND, 5)
# verify the filament is off
self.assert_get(Parameter.FL_ACTUAL, 0.0)
def test_bad_command(self):
"""
Verify bad commands raise exceptions
"""
self.assert_initialize_driver()
self.assert_driver_command_exception('BAD_COMMAND', exception_class=InstrumentCommandException)
def test_incomplete_config(self):
"""
Break our startup config, then verify the driver raises an exception
"""
# grab the old config
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
old_value = startup_params[Parameter.EE]
try:
# delete a required parameter
del (startup_params[Parameter.EE])
# re-init to take our broken config
self.init_driver_process_client()
self.assert_initialize_driver()
# request start scan
self.assert_driver_command(Capability.START_SCAN)
self.assertTrue(False, msg='Failed to raise exception on missing parameter')
except Exception as e:
self.assertTrue(self._driver_exception_match(e, InstrumentProtocolException))
finally:
startup_params[Parameter.EE] = old_value
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(InstrumentDriverQualificationTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def test_direct_access_telnet_mode(self):
"""
This test manually tests that the Instrument Driver properly supports
direct access to the physical instrument. (telnet mode)
"""
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
self.tcp_client.send_data(InstrumentCommand.ID + '?' + NEWLINE)
self.assertTrue(self.tcp_client.expect('SRSRGA200'))
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 5)
def test_poll(self):
"""
A scan is the closest thing we have to a poll here...
"""
self.assert_enter_command_mode()
self.assert_particle_polled(Capability.START_SCAN,
self.assert_rga_status_particle,
DataParticleType.RGA_STATUS,
timeout=30)
self.assert_particle_async(DataParticleType.RGA_SAMPLE, self.assert_rga_sample_particle, timeout=100)
self.assert_execute_resource(Capability.STOP_SCAN)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 5)
def test_get_set_parameters(self):
"""
verify that all parameters can be get set properly, this includes
ensuring that read only parameters fail on set.
"""
self.assert_enter_command_mode()
constraints = ParameterConstraints.dict()
parameters = Parameter.reverse_dict()
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
for key, value in startup_params.items():
self.assert_get_parameter(key, value)
if key in parameters and parameters[key] in constraints:
_, minimum, maximum = constraints[parameters[key]]
self.assert_set_parameter(key, maximum-1)
else:
self.assert_set_parameter(key, value + 1)
def test_reset(self):
"""
Verify the agent can be reset
Overridden, driver does not have autosample
"""
self.assert_enter_command_mode()
self.assert_reset()
def test_discover(self):
"""
Overridden, driver does not have autosample
"""
# Verify the agent is in command mode
self.assert_enter_command_mode()
# Now reset and try to discover. This will stop the driver which holds the current
# instrument state.
self.assert_reset()
self.assert_discover(ResourceAgentState.COMMAND)
|
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import errno
import itertools
import json
import multiprocessing
import os
import shutil
import subprocess
import sys
import traceback
from shutil import copyfile
BARE_INTERFACE_SEARCH_PATHS = [
"usr/lib/swift",
"System/iOSSupport/usr/lib/swift"
]
DEFAULT_FRAMEWORK_INTERFACE_SEARCH_PATHS = [
"System/Library/Frameworks",
"System/iOSSupport/System/Library/Frameworks"
]
STDLIB_NAME = 'Swift'
MONOTONIC_VERSION = 1
def create_parser():
parser = argparse.ArgumentParser(
description="Builds an SDK's swiftinterfaces into swiftmodules. "
"Always searches usr/lib/swift in addition to whichever "
"framework directories are passed on the command line.",
prog=os.path.basename(__file__),
usage='%(prog)s -o output/ [INTERFACE_SEARCH_DIRS]',
epilog='Environment variables: SDKROOT, SWIFT_EXEC, '
'SWIFT_FORCE_MODULE_LOADING')
parser.add_argument('interface_framework_dirs', nargs='*',
metavar='INTERFACE_SEARCH_DIRS',
help='Relative paths to search for frameworks with '
'interfaces (default: System/Library/Frameworks)')
parser.add_argument('-o', dest='output_dir',
help='Directory to which the output will be emitted '
'(required)')
parser.add_argument('-j', dest='jobs', type=int,
help='The number of parallel jobs to execute '
'(default: # of cores)')
parser.add_argument('-v', dest='verbose', action='store_true',
help='Print command invocations and progress info')
parser.add_argument('-n', dest='dry_run', action='store_true',
help='Dry run: don\'t actually run anything')
parser.add_argument('-sdk', default=os.getenv('SDKROOT'),
help='SDK to find frameworks and interfaces in '
'(default: $SDKROOT)')
parser.add_argument('-F', dest='framework_dirs', metavar='DIR',
action='append', default=[],
help='Add additional framework search paths')
parser.add_argument('-Fsystem', '-iframework',
dest='system_framework_dirs', metavar='DIR',
action='append', default=[],
help='Add additional system framework search paths')
parser.add_argument('-Fsystem-iosmac',
dest='iosmac_system_framework_dirs', metavar='DIR',
action='append', default=[],
help='Add system framework search paths '
'for iOSMac only')
parser.add_argument('-I', dest='include_dirs', metavar='DIR',
action='append', default=[],
help='Add additional header/module search paths')
parser.add_argument('-module-cache-path',
help='Temporary directory to store intermediate info')
parser.add_argument('-log-path',
help='Directory to write stdout/stderr output to')
parser.add_argument('-skip-stdlib', action='store_true',
help='Don\'t build the standard library interface')
parser.add_argument('-disable-modules-validate-system-headers',
action='store_true',
help='Disable modules verification for system headers')
parser.add_argument('-xfails', metavar='PATH',
help='JSON file containing an array of the modules '
'expected to fail')
parser.add_argument('-check-only', action='store_true',
help='Assume the resulting modules will be thrown '
'away (may be faster)')
parser.add_argument('-ignore-non-stdlib-failures', action='store_true',
help='Treat all modules but the stdlib as XFAILed')
parser.add_argument('-debug-crash-compiler', action='store_true',
help='Have the compiler crash (for testing purposes)')
parser.add_argument('-machine-parseable-monotonic-version',
action='store_true',
help='For comparing versions of this tool')
return parser
def fatal(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def run_command(args, dry_run):
if dry_run:
return (0, "", "")
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
out, err = proc.communicate()
exitcode = proc.returncode
return (exitcode, out.decode('utf-8'), err.decode('utf-8'))
except KeyboardInterrupt:
proc.terminate()
raise
def make_dirs_if_needed(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class NegatedSet:
def __init__(self, contents):
self._contents = frozenset(contents)
def __contains__(self, item):
return item not in self._contents
class ModuleFile:
def __init__(self, name, path, is_expected_to_fail):
self.name = name
self.path = path
self.is_expected_to_fail = is_expected_to_fail
def collect_slices(xfails, swiftmodule_dir):
if not os.path.isdir(swiftmodule_dir):
return
module_name, extension = \
os.path.splitext(os.path.basename(swiftmodule_dir))
assert extension == ".swiftmodule"
is_xfail = module_name in xfails
for entry in os.listdir(swiftmodule_dir):
_, extension = os.path.splitext(entry)
if extension == ".swiftinterface":
yield ModuleFile(module_name, os.path.join(swiftmodule_dir, entry),
is_xfail)
def collect_framework_modules(sdk, xfails, sdk_relative_framework_dirs):
for sdk_relative_framework_dir in sdk_relative_framework_dirs:
framework_dir = os.path.join(sdk, sdk_relative_framework_dir)
if not os.access(framework_dir, os.R_OK):
continue
for entry in os.listdir(framework_dir):
path_without_extension, extension = os.path.splitext(entry)
if extension != ".framework":
continue
module_name = os.path.basename(path_without_extension)
swiftmodule = os.path.join(framework_dir, entry, "Modules",
module_name + ".swiftmodule")
if os.access(swiftmodule, os.R_OK):
for x in collect_slices(xfails, swiftmodule):
yield x
def collect_non_framework_modules(sdk, xfails, sdk_relative_search_dirs):
for sdk_relative_search_dir in sdk_relative_search_dirs:
search_dir = os.path.join(sdk, sdk_relative_search_dir)
for dir_path, _, file_names in os.walk(search_dir, followlinks=True):
if os.path.splitext(dir_path)[1] == ".swiftmodule":
for x in collect_slices(xfails, dir_path):
yield x
else:
for interface in file_names:
module_name, extension = os.path.splitext(interface)
if extension == ".swiftinterface":
is_xfail = module_name in xfails
yield ModuleFile(module_name,
os.path.join(dir_path, interface),
is_xfail)
def should_retry_compilation(stderr):
if "has been modified since the module file" in stderr:
return True
if "mismatched umbrella headers in submodule" in stderr:
return True
if "is out of date and needs to be rebuilt: signature mismatch" in stderr:
return True
if "current parser token 'include'" in stderr:
return True
if "current parser token 'import'" in stderr:
return True
return False
def run_with_module_cache_retry(command_args, module_cache_path, dry_run):
"""Hack: runs a command several times, clearing the module cache if we get
an error about header files being modified during the run.
This shouldn't be necessary (the cached PCM files should automatically be
regenerated) but there seems to still be a bug in Clang that we haven't
tracked down yet.
"""
RETRIES = 3
attempts_stderr = ""
for r in range(RETRIES):
status, stdout, stderr = run_command(command_args, dry_run)
if status == 0:
break
if not should_retry_compilation(stderr):
break
if module_cache_path:
shutil.rmtree(module_cache_path, ignore_errors=True)
# If all retries fail, output information for each instance.
attempts_stderr += (
"\n*** Compilation attempt {}/{} failed with modules bugs. "
"Error output:\n".format(r + 1, RETRIES))
attempts_stderr += stderr
stderr = attempts_stderr
return (status, stdout, stderr)
def log_output_to_file(content, module_name, interface_base, label, log_path):
if not log_path:
return
if not content:
return
make_dirs_if_needed(log_path)
log_name = module_name + "-" + interface_base + "-" + label + ".txt"
with open(os.path.join(log_path, log_name), "w") as output_file:
output_file.write(content)
def looks_like_iosmac(interface_base):
return 'ios-macabi' in interface_base
def process_module(module_file):
global args, shared_output_lock
try:
interface_base, _ = \
os.path.splitext(os.path.basename(module_file.path))
swiftc = os.getenv('SWIFT_EXEC',
os.path.join(os.path.dirname(__file__), 'swiftc'))
command_args = [
swiftc, '-frontend',
'-build-module-from-parseable-interface',
'-sdk', args.sdk,
'-prebuilt-module-cache-path', args.output_dir,
]
module_cache_path = ""
if args.module_cache_path:
module_cache_path = os.path.join(args.module_cache_path,
str(os.getpid()))
command_args += ('-module-cache-path', module_cache_path)
if args.debug_crash_compiler:
command_args += ('-debug-crash-immediately',)
if not args.check_only:
command_args += (
'-serialize-parseable-module-interface-dependency-hashes',)
if args.disable_modules_validate_system_headers:
command_args += (
'-disable-modules-validate-system-headers',)
# FIXME: This shouldn't be necessary, but the module name is checked
# before the frontend action is.
if module_file.name == STDLIB_NAME:
command_args += ('-parse-stdlib',)
if looks_like_iosmac(interface_base):
for system_framework_path in args.iosmac_system_framework_dirs:
command_args += ('-Fsystem', system_framework_path)
command_args += ('-Fsystem', os.path.join(args.sdk, "System",
"iOSSupport", "System",
"Library", "Frameworks"))
for include_path in args.include_dirs:
command_args += ('-I', include_path)
for system_framework_path in args.system_framework_dirs:
command_args += ('-Fsystem', system_framework_path)
for framework_path in args.framework_dirs:
command_args += ('-F', framework_path)
command_args += ('-module-name', module_file.name, module_file.path)
output_path = os.path.join(args.output_dir,
module_file.name + ".swiftmodule")
if interface_base != module_file.name:
make_dirs_if_needed(output_path)
output_path = os.path.join(output_path,
interface_base + ".swiftmodule")
command_args += ('-o', output_path)
if args.verbose:
with shared_output_lock:
print("# Starting " + module_file.path)
print(' '.join(command_args))
sys.stdout.flush()
status, stdout, stderr = run_with_module_cache_retry(
command_args, module_cache_path=module_cache_path,
dry_run=args.dry_run)
log_output_to_file(stdout, module_file.name, interface_base, "out",
log_path=args.log_path)
log_output_to_file(stderr, module_file.name, interface_base, "err",
log_path=args.log_path)
return (module_file, status, stdout, stderr)
except BaseException:
# We're catching everything here because we don't want to take down the
# other jobs.
return (module_file, 1, "",
"".join(traceback.format_exception(*sys.exc_info())))
def set_up_child(parent_args, lock):
global args, shared_output_lock
args = parent_args
shared_output_lock = lock
def process_module_files(pool, module_files):
results = pool.imap_unordered(process_module, module_files)
overall_exit_status = 0
for (module_file, exit_status, stdout, stderr) in results:
with shared_output_lock:
if exit_status != 0:
print("# ", end="")
if module_file.is_expected_to_fail:
print("(XFAIL) ", end="")
else:
print("(FAIL) ", end="")
print(module_file.path)
if (not module_file.is_expected_to_fail) or args.verbose:
print(stdout, end="")
print(stderr, end="", file=sys.stderr)
elif module_file.is_expected_to_fail:
print("# (UPASS) " + module_file.path)
elif args.verbose:
print("# (PASS) " + module_file.path)
sys.stdout.flush()
if overall_exit_status == 0 and \
not module_file.is_expected_to_fail:
overall_exit_status = exit_status
return overall_exit_status
def getSDKVersion(sdkroot):
settingPath = os.path.join(sdkroot, 'SDKSettings.json')
with open(settingPath) as json_file:
data = json.load(json_file)
return data['Version']
fatal("Failed to get SDK version from: " + settingPath)
def copySystemVersionFile(sdkroot, output):
sysInfoPath = os.path.join(sdkroot,
'System/Library/CoreServices/SystemVersion.plist')
destInfoPath = os.path.join(output, 'SystemVersion.plist')
try:
copyfile(sysInfoPath, destInfoPath)
except BaseException as e:
print("cannot copy from " + sysInfoPath + " to " + destInfoPath + ": " + str(e))
def main():
global args, shared_output_lock
parser = create_parser()
args = parser.parse_args()
if args.machine_parseable_monotonic_version:
print(MONOTONIC_VERSION)
sys.exit(0)
if 'SWIFT_FORCE_MODULE_LOADING' not in os.environ:
os.environ['SWIFT_FORCE_MODULE_LOADING'] = 'prefer-serialized'
if not args.output_dir:
fatal("argument -o is required")
if not args.sdk:
fatal("SDKROOT must be set in the environment")
if not os.path.isdir(args.sdk):
fatal("invalid SDK: " + args.sdk)
# if the given output dir ends with 'prebuilt-modules', we should
# append the SDK version number so all modules will built into
# the SDK-versioned sub-directory.
if os.path.basename(args.output_dir) == 'prebuilt-modules':
args.output_dir = os.path.join(args.output_dir, getSDKVersion(args.sdk))
xfails = ()
if args.ignore_non_stdlib_failures:
if args.xfails:
print("warning: ignoring -xfails because "
"-ignore-non-stdlib-failures was provided", file=sys.stderr)
xfails = NegatedSet((STDLIB_NAME,))
elif args.xfails:
with open(args.xfails) as xfails_file:
xfails = json.load(xfails_file)
make_dirs_if_needed(args.output_dir)
# Copy a file containing SDK build version into the prebuilt module dir,
# so we can keep track of the SDK version we built from.
copySystemVersionFile(args.sdk, args.output_dir)
shared_output_lock = multiprocessing.Lock()
pool = multiprocessing.Pool(args.jobs, set_up_child,
(args, shared_output_lock))
interface_framework_dirs = (args.interface_framework_dirs or
DEFAULT_FRAMEWORK_INTERFACE_SEARCH_PATHS)
module_files = list(itertools.chain(
collect_non_framework_modules(args.sdk, xfails,
BARE_INTERFACE_SEARCH_PATHS),
collect_framework_modules(args.sdk, xfails, interface_framework_dirs)))
if not args.skip_stdlib:
# Always do the stdlib first, so that we can use it in later steps
stdlib_module_files = (
x for x in module_files if x.name == STDLIB_NAME)
status = process_module_files(pool, stdlib_module_files)
if status != 0:
sys.exit(status)
non_stdlib_module_files = (
x for x in module_files if x.name != STDLIB_NAME)
status = process_module_files(pool, non_stdlib_module_files)
if os.name == 'nt':
import ctypes
Kernel32 = ctypes.cdll.LoadLibrary("Kernel32.dll")
Kernel32.ExitProcess(ctypes.c_ulong(status))
sys.exit(status)
if __name__ == '__main__':
main()
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:46942")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:46942")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os.path
import keyring
import numpy as np
import re
import tarfile
import string
import requests
import warnings
from pkg_resources import resource_filename
from bs4 import BeautifulSoup
import pyvo
from urllib.parse import urljoin
from astropy.table import Table, Column, vstack
from astroquery import log
from astropy.utils import deprecated
from astropy.utils.console import ProgressBar
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy import units as u
from astropy.time import Time
from pyvo.dal.sia2 import SIA_PARAMETERS_DESC
from ..exceptions import LoginError
from ..utils import commons
from ..utils.process_asyncs import async_to_sync
from ..query import QueryWithLogin
from .tapsql import _gen_pos_sql, _gen_str_sql, _gen_numeric_sql,\
_gen_band_list_sql, _gen_datetime_sql, _gen_pol_sql, _gen_pub_sql,\
_gen_science_sql, _gen_spec_res_sql, ALMA_DATE_FORMAT
from . import conf, auth_urls
from astroquery.utils.commons import ASTROPY_LT_4_1
__all__ = {'AlmaClass', 'ALMA_BANDS'}
__doctest_skip__ = ['AlmaClass.*']
ALMA_TAP_PATH = 'tap'
ALMA_SIA_PATH = 'sia2'
ALMA_DATALINK_PATH = 'datalink/sync'
# Map from ALMA ObsCore result to ALMA original query result
# The map is provided in order to preserve the name of the columns in the
# original ALMA query original results and make it backwards compatible
# key - current column, value - original column name
_OBSCORE_TO_ALMARESULT = {
'proposal_id': 'Project code',
'target_name': 'Source name',
's_ra': 'RA',
's_dec': 'Dec',
'gal_longitude': 'Galactic longitude',
'gal_latitude': 'Galactic latitude',
'band_list': 'Band',
's_region': 'Footprint',
'em_resolution': 'Frequency resolution',
'antenna_arrays': 'Array',
'is_mosaic': 'Mosaic',
't_exptime': 'Integration',
'obs_release_date': 'Release date',
'frequency_support': 'Frequency support',
'velocity_resolution': 'Velocity resolution',
'pol_states': 'Pol products',
't_min': 'Observation date',
'obs_creator_name': 'PI name',
'schedblock_name': 'SB name',
'proposal_authors': 'Proposal authors',
'sensitivity_10kms': 'Line sensitivity (10 km/s)',
'cont_sensitivity_bandwidth': 'Continuum sensitivity',
'pwv': 'PWV',
'group_ous_uid': 'Group ous id',
'member_ous_uid': 'Member ous id',
'asdm_uid': 'Asdm uid',
'obs_title': 'Project title',
'type': 'Project type',
'scan_intent': 'Scan intent',
's_fov': 'Field of view',
'spatial_scale_max': 'Largest angular scale',
'qa2_passed': 'QA2 Status',
# TODO COUNT
'science_keyword': 'Science keyword',
'scientific_category': 'Scientific category'
}
ALMA_BANDS = {
'3': (84*u.GHz, 116*u.GHz),
'4': (125*u.GHz, 163*u.GHz),
'5': (163*u.GHz, 211*u.GHz),
'6': (211*u.GHz, 275*u.GHz),
'7': (275*u.GHz, 373*u.GHz),
'8': (385*u.GHz, 500*u.GHz),
'9': (602*u.GHz, 720*u.GHz),
'10': (787*u.GHz, 950*u.GHz)
}
ALMA_FORM_KEYS = {
'Position': {
'Source name (astropy Resolver)': ['source_name_resolver',
'SkyCoord.from_name', _gen_pos_sql],
'Source name (ALMA)': ['source_name_alma', 'target_name', _gen_str_sql],
'RA Dec (Sexagesimal)': ['ra_dec', 's_ra, s_dec', _gen_pos_sql],
'Galactic (Degrees)': ['galactic', 'gal_longitude, gal_latitude',
_gen_pos_sql],
'Angular resolution (arcsec)': ['spatial_resolution',
'spatial_resolution', _gen_numeric_sql],
'Largest angular scale (arcsec)': ['spatial_scale_max',
'spatial_scale_max', _gen_numeric_sql],
'Field of view (arcsec)': ['fov', 's_fov', _gen_numeric_sql]
},
'Energy': {
'Frequency (GHz)': ['frequency', 'frequency', _gen_numeric_sql],
'Bandwidth (Hz)': ['bandwidth', 'bandwidth', _gen_numeric_sql],
'Spectral resolution (KHz)': ['spectral_resolution',
'em_resolution', _gen_spec_res_sql],
'Band': ['band_list', 'band_list', _gen_band_list_sql]
},
'Time': {
'Observation date': ['start_date', 't_min', _gen_datetime_sql],
'Integration time (s)': ['integration_time', 't_exptime',
_gen_numeric_sql]
},
'Polarization': {
'Polarisation type (Single, Dual, Full)': ['polarisation_type',
'pol_states', _gen_pol_sql]
},
'Observation': {
'Line sensitivity (10 km/s) (mJy/beam)': ['line_sensitivity',
'sensitivity_10kms',
_gen_numeric_sql],
'Continuum sensitivity (mJy/beam)': ['continuum_sensitivity',
'cont_sensitivity_bandwidth',
_gen_numeric_sql],
'Water vapour (mm)': ['water_vapour', 'pvw', _gen_numeric_sql]
},
'Project': {
'Project code': ['project_code', 'proposal_id', _gen_str_sql],
'Project title': ['project_title', 'obs_title', _gen_str_sql],
'PI name': ['pi_name', 'obs_creator_name', _gen_str_sql],
'Proposal authors': ['proposal_authors', 'proposal_authors', _gen_str_sql],
'Project abstract': ['project_abstract', 'proposal_abstract', _gen_str_sql],
'Publication count': ['publication_count', 'NA', _gen_str_sql],
'Science keyword': ['science_keyword', 'science_keyword', _gen_str_sql]
},
'Publication': {
'Bibcode': ['bibcode', 'bib_reference', _gen_str_sql],
'Title': ['pub_title', 'pub_title', _gen_str_sql],
'First author': ['first_author', 'first_author', _gen_str_sql],
'Authors': ['authors', 'authors', _gen_str_sql],
'Abstract': ['pub_abstract', 'pub_abstract', _gen_str_sql],
'Year': ['publication_year', 'pub_year', _gen_numeric_sql]
},
'Options': {
'Public data only': ['public_data', 'data_rights', _gen_pub_sql],
'Science observations only': ['science_observation',
'science_observation', _gen_science_sql]
}
}
def _gen_sql(payload):
sql = 'select * from ivoa.obscore'
where = ''
if payload:
for constraint in payload:
for attrib_category in ALMA_FORM_KEYS.values():
for attrib in attrib_category.values():
if constraint in attrib:
# use the value and the second entry in attrib which
# is the new name of the column
val = payload[constraint]
if constraint == 'em_resolution':
# em_resolution does not require any transformation
attrib_where = _gen_numeric_sql(constraint, val)
else:
attrib_where = attrib[2](attrib[1], val)
if attrib_where:
if where:
where += ' AND '
else:
where = ' WHERE '
where += attrib_where
return sql + where
@async_to_sync
class AlmaClass(QueryWithLogin):
TIMEOUT = conf.timeout
archive_url = conf.archive_url
USERNAME = conf.username
def __init__(self):
# sia service does not need disambiguation but tap does
super(AlmaClass, self).__init__()
self._sia = None
self._tap = None
self._datalink = None
self.sia_url = None
self.tap_url = None
self.datalink_url = None
@property
def datalink(self):
if not self._datalink:
base_url = self._get_dataarchive_url()
if base_url.endswith('/'):
self.datalink_url = base_url + ALMA_DATALINK_PATH
else:
self.datalink_url = base_url + '/' + ALMA_DATALINK_PATH
self._datalink = pyvo.dal.adhoc.DatalinkService(
baseurl=self.datalink_url)
return self._datalink
@property
def sia(self):
if not self._sia:
base_url = self._get_dataarchive_url()
if base_url.endswith('/'):
self.sia_url = base_url + ALMA_SIA_PATH
else:
self.sia_url = base_url + '/' + ALMA_SIA_PATH
self._sia = pyvo.dal.sia2.SIAService(baseurl=self.sia_url)
return self._sia
@property
def tap(self):
if not self._tap:
base_url = self._get_dataarchive_url()
if base_url.endswith('/'):
self.tap_url = base_url + ALMA_TAP_PATH
else:
self.tap_url = base_url + '/' + ALMA_TAP_PATH
self._tap = pyvo.dal.tap.TAPService(baseurl=self.tap_url)
return self._tap
def query_object_async(self, object_name, cache=None, public=True,
science=True, payload=None, **kwargs):
"""
Query the archive for a source name.
Parameters
----------
object_name : str
The object name. Will be resolved by astropy.coord.SkyCoord
cache : deprecated
public : bool
True to return only public datasets, False to return private only,
None to return both
science : bool
True to return only science datasets, False to return only
calibration, None to return both
payload : dict
Dictionary of additional keywords. See `help`.
"""
if payload is not None:
payload['source_name_resolver'] = object_name
else:
payload = {'source_name_resolver': object_name}
return self.query_async(public=public, science=science,
payload=payload, **kwargs)
def query_region_async(self, coordinate, radius, cache=None, public=True,
science=True, payload=None, **kwargs):
"""
Query the ALMA archive with a source name and radius
Parameters
----------
coordinates : str / `astropy.coordinates`
the identifier or coordinates around which to query.
radius : str / `~astropy.units.Quantity`, optional
the radius of the region
cache : Deprecated
Cache the query?
public : bool
True to return only public datasets, False to return private only,
None to return both
science : bool
True to return only science datasets, False to return only
calibration, None to return both
payload : dict
Dictionary of additional keywords. See `help`.
"""
rad = radius
if not isinstance(radius, u.Quantity):
rad = radius*u.deg
obj_coord = commons.parse_coordinates(coordinate).icrs
ra_dec = '{}, {}'.format(obj_coord.to_string(), rad.to(u.deg).value)
if payload is None:
payload = {}
if 'ra_dec' in payload:
payload['ra_dec'] += ' | {}'.format(ra_dec)
else:
payload['ra_dec'] = ra_dec
return self.query_async(public=public, science=science,
payload=payload, **kwargs)
def query_async(self, payload, cache=None, public=True, science=True,
legacy_columns=False, max_retries=None,
get_html_version=None,
get_query_payload=None, **kwargs):
"""
Perform a generic query with user-specified payload
Parameters
----------
payload : dictionary
Please consult the `help` method
cache : deprecated
public : bool
True to return only public datasets, False to return private only,
None to return both
science : bool
True to return only science datasets, False to return only
calibration, None to return both
legacy_columns : bool
True to return the columns from the obsolete ALMA advanced query,
otherwise return the current columns based on ObsCore model.
Returns
-------
Table with results. Columns are those in the ALMA ObsCore model
(see ``help_tap``) unless ``legacy_columns`` argument is set to True.
"""
local_args = dict(locals().items())
for arg in local_args:
# check if the deprecated attributes have been used
for dep in ['cache', 'max_retries', 'get_html_version']:
if arg[0] == dep and arg[1] is not None:
warnings.warn(
("Argument '{}' has been deprecated "
"since version 4.0.1 and will be ignored".format(arg[0])),
AstropyDeprecationWarning)
del kwargs[arg[0]]
if payload is None:
payload = {}
for arg in kwargs:
value = kwargs[arg]
if 'band_list' == arg and isinstance(value, list):
value = ' '.join([str(_) for _ in value])
if arg in payload:
payload[arg] = '{} {}'.format(payload[arg], value)
else:
payload[arg] = value
if science is not None:
payload['science_observation'] = science
if public is not None:
payload['public_data'] = public
if get_query_payload:
return payload
query = _gen_sql(payload)
result = self.query_tap(query, maxrec=payload.get('maxrec', None))
if result is not None:
result = result.to_table()
else:
# Should not happen
raise RuntimeError('BUG: Unexpected result None')
if legacy_columns:
legacy_result = Table()
# add 'Observation date' column
for col_name in _OBSCORE_TO_ALMARESULT:
if col_name in result.columns:
if col_name == 't_min':
legacy_result['Observation date'] = \
[Time(_['t_min'], format='mjd').strftime(
ALMA_DATE_FORMAT) for _ in result]
else:
legacy_result[_OBSCORE_TO_ALMARESULT[col_name]] = \
result[col_name]
else:
log.error("Invalid column mapping in OBSCORE_TO_ALMARESULT: "
"{}:{}. Please "
"report this as an Issue."
.format(col_name, _OBSCORE_TO_ALMARESULT[col_name]))
return legacy_result
return result
def query_sia(self, pos=None, band=None, time=None, pol=None,
field_of_view=None, spatial_resolution=None,
spectral_resolving_power=None, exptime=None,
timeres=None, publisher_did=None,
facility=None, collection=None,
instrument=None, data_type=None,
calib_level=None, target_name=None,
res_format=None, maxrec=None,
**kwargs):
"""
Use standard SIA2 attributes to query the ALMA SIA service.
Parameters
----------
_SIA2_PARAMETERS
Returns
-------
Results in `pyvo.dal.SIAResults` format.
result.table in Astropy table format
"""
return self.sia.search(
pos=pos,
band=band,
time=time,
pol=pol,
field_of_view=field_of_view,
spatial_resolution=spatial_resolution,
spectral_resolving_power=spectral_resolving_power,
exptime=exptime,
timeres=timeres,
publisher_did=publisher_did,
facility=facility,
collection=collection,
instrument=instrument,
data_type=data_type,
calib_level=calib_level,
target_name=target_name,
res_format=res_format,
maxrec=maxrec,
**kwargs)
# SIA_PARAMETERS_DESC contains links that Sphinx can't resolve.
for var in ('POLARIZATION_STATES', 'CALIBRATION_LEVELS'):
SIA_PARAMETERS_DESC = SIA_PARAMETERS_DESC.replace(f'`pyvo.dam.obscore.{var}`',
f'pyvo.dam.obscore.{var}')
query_sia.__doc__ = query_sia.__doc__.replace('_SIA2_PARAMETERS', SIA_PARAMETERS_DESC)
def query_tap(self, query, maxrec=None):
"""
Send query to the ALMA TAP. Results in pyvo.dal.TapResult format.
result.table in Astropy table format
Parameters
----------
maxrec : int
maximum number of records to return
"""
log.debug('TAP query: {}'.format(query))
return self.tap.search(query, language='ADQL', maxrec=maxrec)
def help_tap(self):
print('Table to query is "voa.ObsCore".')
print('For example: "select top 1 * from ivoa.ObsCore"')
print('The scheme of the table is as follows.\n')
print(' {0:20s} {1:15s} {2:10} {3}'.
format('Name', 'Type', 'Unit', 'Description'))
print('-'*90)
for tb in self.tap.tables.items():
if tb[0] == 'ivoa.obscore':
for col in tb[1].columns:
if col.datatype.content == 'char':
type = 'char({})'.format(col.datatype.arraysize)
else:
type = str(col.datatype.content)
unit = col.unit if col.unit else ''
print(' {0:20s} {1:15s} {2:10} {3}'.
format(col.name, type, unit, col.description))
# update method pydocs
query_region_async.__doc__ = query_region_async.__doc__.replace(
'_SIA2_PARAMETERS', pyvo.dal.sia2.SIA_PARAMETERS_DESC)
query_object_async.__doc__ = query_object_async.__doc__.replace(
'_SIA2_PARAMETERS', pyvo.dal.sia2.SIA_PARAMETERS_DESC)
query_async.__doc__ = query_async.__doc__.replace(
'_SIA2_PARAMETERS', pyvo.dal.sia2.SIA_PARAMETERS_DESC)
def _get_dataarchive_url(self):
"""
If the generic ALMA URL is used, query it to determine which mirror to
access for querying data
"""
if not hasattr(self, 'dataarchive_url'):
if self.archive_url in ('http://almascience.org', 'https://almascience.org'):
response = self._request('GET', self.archive_url,
cache=False)
response.raise_for_status()
# Jan 2017: we have to force https because the archive doesn't
# tell us it needs https.
self.dataarchive_url = response.url.replace(
"/asax/", "").replace("/aq/", "").replace("http://", "https://")
else:
self.dataarchive_url = self.archive_url
elif self.dataarchive_url in ('http://almascience.org',
'https://almascience.org'):
raise ValueError("'dataarchive_url' was set to a disambiguation "
"page that is meant to redirect to a real "
"archive. You should only reach this message "
"if you manually specified Alma.dataarchive_url. "
"If you did so, instead consider setting "
"Alma.archive_url. Otherwise, report an error "
"on github.")
return self.dataarchive_url
@deprecated(since="v0.4.1", alternative="get_data_info")
def stage_data(self, uids, expand_tarfiles=False, return_json=False):
"""
Obtain table of ALMA files
DEPRECATED: Data is no longer staged. This method is deprecated and
kept here for backwards compatibility reasons but it's not fully
compatible with the original implementation.
Parameters
----------
uids : list or str
A list of valid UIDs or a single UID.
UIDs should have the form: 'uid://A002/X391d0b/X7b'
expand_tarfiles : DEPRECATED
return_json : DEPRECATED
Note: The returned astropy table can be easily converted to json
through pandas:
output = StringIO()
stage_data(...).to_pandas().to_json(output)
table_json = output.getvalue()
Returns
-------
data_file_table : Table
A table containing 3 columns: the UID, the file URL (for future
downloading), and the file size
"""
if return_json:
raise AttributeError(
'return_json is deprecated. See method docs for a workaround')
table = Table()
res = self.get_data_info(uids, expand_tarfiles=expand_tarfiles)
p = re.compile(r'.*(uid__.*)\.asdm.*')
if res:
table['name'] = [u.split('/')[-1] for u in res['access_url']]
table['id'] = [p.search(x).group(1) if 'asdm' in x else 'None'
for x in table['name']]
table['type'] = res['content_type']
table['size'] = res['content_length']
table['permission'] = ['UNKNOWN'] * len(res)
table['mous_uid'] = [uids] * len(res)
table['URL'] = res['access_url']
table['isProprietary'] = res['readable']
return table
def get_data_info(self, uids, expand_tarfiles=False,
with_auxiliary=True, with_rawdata=True):
"""
Return information about the data associated with ALMA uid(s)
Parameters
----------
uids : list or str
A list of valid UIDs or a single UID.
UIDs should have the form: 'uid://A002/X391d0b/X7b'
expand_tarfiles : bool
False to return information on the tarfiles packages containing
the data or True to return information about individual files in
these packages
with_auxiliary : bool
True to include the auxiliary packages, False otherwise
with_rawdata : bool
True to include raw data, False otherwise
Returns
-------
Table with results or None. Table has the following columns: id (UID),
access_url (URL to access data), content_length, content_type (MIME
type), semantics, description (optional), error_message (optional)
"""
if uids is None:
raise AttributeError('UIDs required')
if isinstance(uids, (str, bytes)):
uids = [uids]
if not isinstance(uids, (list, tuple, np.ndarray)):
raise TypeError("Datasets must be given as a list of strings.")
# TODO remove this loop and send uids at once when pyvo fixed
result = None
for uid in uids:
res = self.datalink.run_sync(uid)
if res.status[0] != 'OK':
raise Exception('ERROR {}: {}'.format(res.status[0],
res.status[1]))
temp = res.to_table()
if ASTROPY_LT_4_1:
# very annoying
for col in [x for x in temp.colnames
if x not in ['content_length', 'readable']]:
temp[col] = temp[col].astype(str)
result = temp if result is None else vstack([result, temp])
to_delete = []
for index, rr in enumerate(result):
if rr['error_message'] is not None and \
rr['error_message'].strip():
log.warning('Error accessing info about file {}: {}'.
format(rr['access_url'], rr['error_message']))
# delete from results. Good thing to do?
to_delete.append(index)
result.remove_rows(to_delete)
if not with_auxiliary:
result = result[np.core.defchararray.find(
result['semantics'], '#aux') == -1]
if not with_rawdata:
result = result[np.core.defchararray.find(
result['semantics'], '#progenitor') == -1]
# primary data delivery type is files packaged in tarballs. However
# some type of data has an alternative way to retrieve each individual
# file as an alternative (semantics='#datalink' and
# 'content_type=application/x-votable+xml;content=datalink'). They also
# require an extra call to the datalink service to get the list of
# files.
DATALINK_FILE_TYPE = 'application/x-votable+xml;content=datalink'
DATALINK_SEMANTICS = '#datalink'
if expand_tarfiles:
# identify the tarballs that can be expandable and replace them
# with the list of components
expanded_result = None
to_delete = []
for index, row in enumerate(result):
if DATALINK_SEMANTICS in row['semantics'] and \
row['content_type'] == DATALINK_FILE_TYPE:
# subsequent call to datalink
file_id = row['access_url'].split('ID=')[1]
expanded_tar = self.get_data_info(file_id)
expanded_tar = expanded_tar[
expanded_tar['semantics'] != '#cutout']
if not expanded_result:
expanded_result = expanded_tar
else:
expanded_result = vstack(
[expanded_result, expanded_tar], join_type='exact')
to_delete.append(index)
# cleanup
result.remove_rows(to_delete)
# add the extra rows
if expanded_result:
result = vstack([result, expanded_result], join_type='exact')
else:
result = result[np.logical_or(np.core.defchararray.find(
result['semantics'].astype(str), DATALINK_SEMANTICS) == -1,
result['content_type'].astype(str) != DATALINK_FILE_TYPE)]
return result
def is_proprietary(self, uid):
"""
Given an ALMA UID, query the servers to determine whether it is
proprietary or not.
"""
query = "select distinct data_rights from ivoa.obscore where " \
"obs_id='{}'".format(uid)
result = self.query_tap(query)
if result:
tableresult = result.to_table()
if not result or len(tableresult) == 0:
raise AttributeError('{} not found'.format(uid))
if len(tableresult) == 1 and tableresult[0][0] == 'Public':
return False
return True
def _HEADER_data_size(self, files):
"""
Given a list of file URLs, return the data size. This is useful for
assessing how much data you might be downloading!
(This is discouraged by the ALMA archive, as it puts unnecessary load
on their system)
"""
totalsize = 0 * u.B
data_sizes = {}
pb = ProgressBar(len(files))
for index, fileLink in enumerate(files):
response = self._request('HEAD', fileLink, stream=False,
cache=False, timeout=self.TIMEOUT)
filesize = (int(response.headers['content-length']) * u.B).to(u.GB)
totalsize += filesize
data_sizes[fileLink] = filesize
log.debug("File {0}: size {1}".format(fileLink, filesize))
pb.update(index + 1)
response.raise_for_status()
return data_sizes, totalsize.to(u.GB)
def download_files(self, files, savedir=None, cache=True,
continuation=True, skip_unauthorized=True,):
"""
Given a list of file URLs, download them
Note: Given a list with repeated URLs, each will only be downloaded
once, so the return may have a different length than the input list
Parameters
----------
files : list
List of URLs to download
savedir : None or str
The directory to save to. Default is the cache location.
cache : bool
Cache the download?
continuation : bool
Attempt to continue where the download left off (if it was broken)
skip_unauthorized : bool
If you receive "unauthorized" responses for some of the download
requests, skip over them. If this is False, an exception will be
raised.
"""
if self.USERNAME:
auth = self._get_auth_info(self.USERNAME)
else:
auth = None
downloaded_files = []
if savedir is None:
savedir = self.cache_location
for file_link in unique(files):
log.debug("Downloading {0} to {1}".format(file_link, savedir))
try:
check_filename = self._request('HEAD', file_link, auth=auth)
check_filename.raise_for_status()
except requests.HTTPError as ex:
if ex.response.status_code == 401:
if skip_unauthorized:
log.info("Access denied to {url}. Skipping to"
" next file".format(url=file_link))
continue
else:
raise(ex)
try:
filename = re.search("filename=(.*)",
check_filename.headers['Content-Disposition']).groups()[0]
except KeyError:
log.info(f"Unable to find filename for {file_link} "
"(missing Content-Disposition in header). "
"Skipping to next file.")
continue
if savedir is not None:
filename = os.path.join(savedir,
filename)
try:
self._download_file(file_link,
filename,
timeout=self.TIMEOUT,
auth=auth,
cache=cache,
method='GET',
head_safe=False,
continuation=continuation)
downloaded_files.append(filename)
except requests.HTTPError as ex:
if ex.response.status_code == 401:
if skip_unauthorized:
log.info("Access denied to {url}. Skipping to"
" next file".format(url=file_link))
continue
else:
raise(ex)
elif ex.response.status_code == 403:
log.error("Access denied to {url}".format(url=file_link))
if 'dataPortal' in file_link and 'sso' not in file_link:
log.error("The URL may be incorrect. Try using "
"{0} instead of {1}"
.format(file_link.replace('dataPortal/',
'dataPortal/sso/'),
file_link))
raise ex
elif ex.response.status_code == 500:
# empirically, this works the second time most of the time...
self._download_file(file_link,
filename,
timeout=self.TIMEOUT,
auth=auth,
cache=cache,
method='GET',
head_safe=False,
continuation=continuation)
downloaded_files.append(filename)
else:
raise ex
return downloaded_files
def _parse_result(self, response, verbose=False):
"""
Parse a VOtable response
"""
if not verbose:
commons.suppress_vo_warnings()
return response
def retrieve_data_from_uid(self, uids, cache=True):
"""
Stage & Download ALMA data. Will print out the expected file size
before attempting the download.
Parameters
----------
uids : list or str
A list of valid UIDs or a single UID.
UIDs should have the form: 'uid://A002/X391d0b/X7b'
cache : bool
Whether to cache the downloads.
Returns
-------
downloaded_files : list
A list of the downloaded file paths
"""
if isinstance(uids, (str, bytes)):
uids = [uids]
if not isinstance(uids, (list, tuple, np.ndarray)):
raise TypeError("Datasets must be given as a list of strings.")
files = self.get_data_info(uids)
file_urls = files['access_url']
totalsize = files['content_length'].sum()*u.B
# each_size, totalsize = self.data_size(files)
log.info("Downloading files of size {0}...".format(totalsize.to(u.GB)))
# TODO: Add cache=cache keyword here. Currently would have no effect.
downloaded_files = self.download_files(file_urls)
return downloaded_files
def _get_auth_info(self, username, store_password=False,
reenter_password=False):
"""
Get the auth info (user, password) for use in another function
"""
if username is None:
if not self.USERNAME:
raise LoginError("If you do not pass a username to login(), "
"you should configure a default one!")
else:
username = self.USERNAME
if hasattr(self, '_auth_url'):
auth_url = self._auth_url
else:
raise LoginError("Login with .login() to acquire the appropriate"
" login URL")
# Get password from keyring or prompt
password, password_from_keyring = self._get_password(
"astroquery:{0}".format(auth_url), username, reenter=reenter_password)
# When authenticated, save password in keyring if needed
if password_from_keyring is None and store_password:
keyring.set_password("astroquery:{0}".format(auth_url), username, password)
return username, password
def _login(self, username=None, store_password=False,
reenter_password=False, auth_urls=auth_urls):
"""
Login to the ALMA Science Portal.
Parameters
----------
username : str, optional
Username to the ALMA Science Portal. If not given, it should be
specified in the config file.
store_password : bool, optional
Stores the password securely in your keyring. Default is False.
reenter_password : bool, optional
Asks for the password even if it is already stored in the
keyring. This is the way to overwrite an already stored passwork
on the keyring. Default is False.
"""
success = False
for auth_url in auth_urls:
# set session cookies (they do not get set otherwise)
cookiesetpage = self._request("GET",
urljoin(self._get_dataarchive_url(),
'rh/forceAuthentication'),
cache=False)
self._login_cookiepage = cookiesetpage
cookiesetpage.raise_for_status()
if (auth_url+'/cas/login' in cookiesetpage.request.url):
# we've hit a target, we're good
success = True
break
if not success:
raise LoginError("Could not log in to any of the known ALMA "
"authorization portals: {0}".format(auth_urls))
# Check if already logged in
loginpage = self._request("GET", "https://{auth_url}/cas/login".format(auth_url=auth_url),
cache=False)
root = BeautifulSoup(loginpage.content, 'html5lib')
if root.find('div', class_='success'):
log.info("Already logged in.")
return True
self._auth_url = auth_url
username, password = self._get_auth_info(username=username,
store_password=store_password,
reenter_password=reenter_password)
# Authenticate
log.info("Authenticating {0} on {1} ...".format(username, auth_url))
# Do not cache pieces of the login process
data = {kw: root.find('input', {'name': kw})['value']
for kw in ('execution', '_eventId')}
data['username'] = username
data['password'] = password
data['submit'] = 'LOGIN'
login_response = self._request("POST", "https://{0}/cas/login".format(auth_url),
params={'service': self._get_dataarchive_url()},
data=data,
cache=False)
# save the login response for debugging purposes
self._login_response = login_response
# do not expose password back to user
del data['password']
# but save the parameters for debug purposes
self._login_parameters = data
authenticated = ('You have successfully logged in' in
login_response.text)
if authenticated:
log.info("Authentication successful!")
self.USERNAME = username
else:
log.exception("Authentication failed!")
return authenticated
def get_cycle0_uid_contents(self, uid):
"""
List the file contents of a UID from Cycle 0. Will raise an error
if the UID is from cycle 1+, since those data have been released in
a different and more consistent format. See
http://almascience.org/documents-and-tools/cycle-2/ALMAQA2Productsv1.01.pdf
for details.
"""
# First, check if UID is in the Cycle 0 listing
if uid in self.cycle0_table['uid']:
cycle0id = self.cycle0_table[
self.cycle0_table['uid'] == uid][0]['ID']
contents = [row['Files']
for row in self._cycle0_tarfile_content
if cycle0id in row['ID']]
return contents
else:
info_url = urljoin(
self._get_dataarchive_url(),
'documents-and-tools/cycle-2/ALMAQA2Productsv1.01.pdf')
raise ValueError("Not a Cycle 0 UID. See {0} for details about "
"cycle 1+ data release formats.".format(info_url))
@property
def _cycle0_tarfile_content(self):
"""
In principle, this is a static file, but we'll retrieve it just in case
"""
if not hasattr(self, '_cycle0_tarfile_content_table'):
url = urljoin(self._get_dataarchive_url(),
'alma-data/archive/cycle-0-tarfile-content')
response = self._request('GET', url, cache=True)
# html.parser is needed because some <tr>'s have form:
# <tr width="blah"> which the default parser does not pick up
root = BeautifulSoup(response.content, 'html.parser')
html_table = root.find('table', class_='grid listing')
data = list(zip(*[(x.findAll('td')[0].text,
x.findAll('td')[1].text)
for x in html_table.findAll('tr')]))
columns = [Column(data=data[0], name='ID'),
Column(data=data[1], name='Files')]
tbl = Table(columns)
assert len(tbl) == 8497
self._cycle0_tarfile_content_table = tbl
else:
tbl = self._cycle0_tarfile_content_table
return tbl
@property
def cycle0_table(self):
"""
Return a table of Cycle 0 Project IDs and associated UIDs.
The table is distributed with astroquery and was provided by Felix
Stoehr.
"""
if not hasattr(self, '_cycle0_table'):
filename = resource_filename(
'astroquery.alma', 'data/cycle0_delivery_asdm_mapping.txt')
self._cycle0_table = Table.read(filename, format='ascii.no_header')
self._cycle0_table.rename_column('col1', 'ID')
self._cycle0_table.rename_column('col2', 'uid')
return self._cycle0_table
def get_files_from_tarballs(self, downloaded_files, regex=r'.*\.fits$',
path='cache_path', verbose=True):
"""
Given a list of successfully downloaded tarballs, extract files
with names matching a specified regular expression. The default
is to extract all FITS files
NOTE: alma now supports direct listing and downloads of tarballs. See
``get_data_info`` and ``download_and_extract_files``
Parameters
----------
downloaded_files : list
A list of downloaded files. These should be paths on your local
machine.
regex : str
A valid regular expression
path : 'cache_path' or str
If 'cache_path', will use the astroquery.Alma cache directory
(``Alma.cache_location``), otherwise will use the specified path.
Note that the subdirectory structure of the tarball will be
maintained.
Returns
-------
filelist : list
A list of the extracted file locations on disk
"""
if path == 'cache_path':
path = self.cache_location
elif not os.path.isdir(path):
raise OSError("Specified an invalid path {0}.".format(path))
fitsre = re.compile(regex)
filelist = []
for fn in downloaded_files:
tf = tarfile.open(fn)
for member in tf.getmembers():
if fitsre.match(member.name):
if verbose:
log.info("Extracting {0} to {1}".format(member.name,
path))
tf.extract(member, path)
filelist.append(os.path.join(path, member.name))
return filelist
def download_and_extract_files(self, urls, delete=True, regex=r'.*\.fits$',
include_asdm=False, path='cache_path',
verbose=True):
"""
Given a list of tarball URLs, it extracts all the FITS files (or
whatever matches the regex)
Parameters
----------
urls : str or list
A single URL or a list of URLs
include_asdm : bool
Only affects cycle 1+ data. If set, the ASDM files will be
downloaded in addition to the script and log files. By default,
though, this file will be downloaded and deleted without extracting
any information: you must change the regex if you want to extract
data from an ASDM tarball
"""
if isinstance(urls, str):
urls = [urls]
if not isinstance(urls, (list, tuple, np.ndarray)):
raise TypeError("Datasets must be given as a list of strings.")
filere = re.compile(regex)
all_files = []
tar_files = []
expanded_files = []
for url in urls:
if url[-4:] != '.tar':
raise ValueError("URLs should be links to tarballs.")
tarfile_name = os.path.split(url)[-1]
if tarfile_name in self._cycle0_tarfile_content['ID']:
# It is a cycle 0 file: need to check if it contains FITS
match = (self._cycle0_tarfile_content['ID'] == tarfile_name)
if not any(re.match(regex, x) for x in
self._cycle0_tarfile_content['Files'][match]):
log.info("No FITS files found in {0}".format(tarfile_name))
continue
else:
if 'asdm' in tarfile_name and not include_asdm:
log.info("ASDM tarballs do not contain FITS files; "
"skipping.")
continue
tar_file = url.split('/')[-1]
files = self.get_data_info(tar_file)
if files:
expanded_files += [x for x in files['access_url'] if
filere.match(x.split('/')[-1])]
else:
tar_files.append(url)
try:
# get the tar files
downloaded = self.download_files(tar_files, savedir=path)
fitsfilelist = self.get_files_from_tarballs(downloaded,
regex=regex, path=path,
verbose=verbose)
if delete:
for tarball_name in downloaded:
log.info("Deleting {0}".format(tarball_name))
os.remove(tarball_name)
all_files += fitsfilelist
# download the other files
all_files += self.download_files(expanded_files, savedir=path)
except requests.ConnectionError as ex:
self.partial_file_list = all_files
log.error("There was an error downloading the file. "
"A partially completed download list is "
"in Alma.partial_file_list")
raise ex
except requests.HTTPError as ex:
if ex.response.status_code == 401:
log.info("Access denied to {url}. Skipping to"
" next file".format(url=url))
else:
raise ex
return all_files
def help(self, cache=True):
"""
Return the valid query parameters
"""
print("\nMost common ALMA query keywords are listed below. These "
"keywords are part of the ALMA ObsCore model, an IVOA standard "
"for metadata representation (3rd column). They were also "
"present in original ALMA Web form and, for backwards "
"compatibility can be accessed with their old names (2nd "
"column).\n"
"More elaborate queries on the ObsCore model "
"are possible with `query_sia` or `query_tap` methods")
print(" {0:33s} {1:35s} {2:35s}".format("Description",
"Original ALMA keyword",
"ObsCore keyword"))
print("-"*103)
for title, section in ALMA_FORM_KEYS.items():
print()
print(title)
for row in section.items():
print(" {0:33s} {1:35s} {2:35s}".format(row[0], row[1][0], row[1][1]))
print('\nExamples of queries:')
print("Alma.query('proposal_id':'2011.0.00131.S'}")
print("Alma.query({'band_list': ['5', '7']}")
print("Alma.query({'source_name_alma': 'GRB021004'})")
print("Alma.query(payload=dict(project_code='2017.1.01355.L', "
"source_name_alma='G008.67'))")
def _json_summary_to_table(self, data, base_url):
"""
Special tool to convert some JSON metadata to a table Obsolete as of
March 2020 - should be removed along with stage_data_prefeb2020
"""
from ..utils import url_helpers
columns = {'mous_uid': [], 'URL': [], 'size': []}
for entry in data['node_data']:
# de_type can be useful (e.g., MOUS), but it is not necessarily
# specified
# file_name and file_key *must* be specified.
is_file = \
(entry['file_name'] != 'null' and entry['file_key'] != 'null')
if is_file:
# "de_name": "ALMA+uid://A001/X122/X35e",
columns['mous_uid'].append(entry['de_name'][5:])
if entry['file_size'] == 'null':
columns['size'].append(np.nan * u.Gbyte)
else:
columns['size'].append(
(int(entry['file_size']) * u.B).to(u.Gbyte))
# example template for constructing url:
# https://almascience.eso.org/dataPortal/requests/keflavich/940238268/ALMA/
# uid___A002_X9d6f4c_X154/2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar
# above is WRONG... except for ASDMs, when it's right
# should be:
# 2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar/2013.1.00546.S_uid___A002_X9d6f4c_X154.asdm.sdm.tar
#
# apparently ASDMs are different from others:
# templates:
# https://almascience.eso.org/dataPortal/requests/keflavich/946895898/ALMA/
# 2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar/2013.1.00308.S_uid___A001_X196_X93_001_of_001.tar
# uid___A002_X9ee74a_X26f0/2013.1.00308.S_uid___A002_X9ee74a_X26f0.asdm.sdm.tar
url = url_helpers.join(base_url,
entry['file_key'],
entry['file_name'])
if 'null' in url:
raise ValueError("The URL {0} was created containing "
"'null', which is invalid.".format(url))
columns['URL'].append(url)
columns['size'] = u.Quantity(columns['size'], u.Gbyte)
tbl = Table([Column(name=k, data=v) for k, v in columns.items()])
return tbl
def get_project_metadata(self, projectid, cache=True):
"""
Get the metadata - specifically, the project abstract - for a given project ID.
"""
if len(projectid) != 14:
raise AttributeError('Wrong length for project ID')
if not projectid[4] == projectid[6] == projectid[12] == '.':
raise AttributeError('Wrong format for project ID')
result = self.query_tap(
"select distinct proposal_abstract from "
"ivoa.obscore where proposal_id='{}'".format(projectid))
if ASTROPY_LT_4_1:
return [result[0]['proposal_abstract'].astype(str)]
else:
return [result[0]['proposal_abstract']]
Alma = AlmaClass()
def clean_uid(uid):
"""
Return a uid with all unacceptable characters replaced with underscores
"""
if not hasattr(uid, 'replace'):
return clean_uid(str(uid.astype('S')))
try:
return uid.decode('utf-8').replace(u"/", u"_").replace(u":", u"_")
except AttributeError:
return uid.replace("/", "_").replace(":", "_")
def reform_uid(uid):
"""
Convert a uid with underscores to the original format
"""
return uid[:3] + "://" + "/".join(uid[6:].split("_"))
def unique(seq):
"""
Return unique elements of a list, preserving order
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def filter_printable(s):
""" extract printable characters from a string """
return filter(lambda x: x in string.printable, s)
def uid_json_to_table(jdata,
productlist=['ASDM', 'PIPELINE_PRODUCT',
'PIPELINE_PRODUCT_TARFILE',
'PIPELINE_AUXILIARY_TARFILE']):
rows = []
def flatten_jdata(this_jdata, mousID=None):
if isinstance(this_jdata, list):
for item in this_jdata:
if item['type'] in productlist:
item['mous_uid'] = mousID
rows.append(item)
elif len(item['children']) > 0:
if len(item['allMousUids']) == 1:
flatten_jdata(item['children'], item['allMousUids'][0])
else:
flatten_jdata(item['children'])
flatten_jdata(jdata['children'])
keys = rows[-1].keys()
columns = [Column(data=[row[key] for row in rows], name=key)
for key in keys if key not in ('children', 'allMousUids')]
columns = [col.astype(str) if col.dtype.name == 'object' else col for col
in columns]
return Table(columns)
|
|
import sys
import os
import unittest
import itertools
import time
from array import array
from weakref import proxy
try:
import threading
except ImportError:
threading = None
from test import test_support
from test.test_support import TESTFN, run_unittest
from UserList import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write('teststring')
self.assertEquals(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
with test_support.check_py3k_warnings():
softspace = f.softspace
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
with test_support.check_py3k_warnings():
# verify softspace is writable
f.softspace = softspace # merely shouldn't blow up
# verify the others aren't
for attr in 'name', 'mode', 'closed':
self.assertRaises((AttributeError, TypeError), setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write('12')
self.f.close()
a = array('c', 'x'*10)
self.f = open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEquals('12', a.tostring()[:n])
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList(['1', '2'])
self.f.writelines(l)
self.f.close()
self.f = open(TESTFN, 'rb')
buf = self.f.read()
self.assertEquals(buf, '12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testRepr(self):
# verify repr works
self.assertTrue(repr(self.f).startswith("<open file '" + TESTFN))
def testErrors(self):
self.f.close()
self.f = open(TESTFN, 'rb')
f = self.f
self.assertEquals(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
self.assertRaises(TypeError, f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', '__iter__']
deprecated_methods = ['xreadlines']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
with test_support.check_py3k_warnings():
for methodname in deprecated_methods:
method = getattr(self.f, methodname)
self.assertRaises(ValueError, method)
self.assertRaises(ValueError, self.f.writelines, [])
# file is closed, __exit__ shouldn't do anything
self.assertEquals(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1 // 0
except:
self.assertEquals(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
def testIssue5677(self):
# Remark: Do not perform more than one test per open file,
# since that does NOT catch the readline error on Windows.
data = 'xxx'
for mode in ['w', 'wb', 'a', 'ab']:
for attr in ['read', 'readline', 'readlines']:
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, getattr(self.f, attr))
self.f.close()
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, lambda: [line for line in self.f])
self.f.close()
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, self.f.readinto, bytearray(len(data)))
self.f.close()
for mode in ['r', 'rb', 'U', 'Ub', 'Ur', 'rU', 'rbU', 'rUb']:
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.write, data)
self.f.close()
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.writelines, [data, data])
self.f.close()
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.truncate)
self.f.close()
class OtherFileTests(unittest.TestCase):
def testOpenDir(self):
this_dir = os.path.dirname(__file__)
for mode in (None, "w"):
try:
if mode:
f = open(this_dir, mode)
else:
f = open(this_dir)
except IOError as e:
self.assertEqual(e.filename, this_dir)
else:
self.fail("opening a directory didn't raise an IOError")
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
# Some invalid modes fail on Windows, but pass on Unix
# Issue3965: avoid a crash on Windows when filename is unicode
for name in (TESTFN, unicode(TESTFN), unicode(TESTFN + '\t')):
try:
f = open(name, "rr")
except (IOError, ValueError):
pass
else:
f.close()
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises(IOError, sys.stdin.seek, -1)
else:
print >>sys.__stdout__, (
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.')
self.assertRaises(IOError, sys.stdin.truncate)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = open(unicode(TESTFN), "w")
self.assertTrue(repr(f).startswith("<open file u'" + TESTFN))
f.close()
os.unlink(TESTFN)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = open(TESTFN, bad_mode)
except ValueError, msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may
# be no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = open(TESTFN, 'w', s)
f.write(str(s))
f.close()
f.close()
f = open(TESTFN, 'r', s)
d = int(f.read())
f.close()
f.close()
except IOError, msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEquals(d, s)
def testTruncateOnWindows(self):
os.unlink(TESTFN)
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = open(TESTFN, 'wb')
f.write('12345678901') # 11 bytes
f.close()
f = open(TESTFN,'rb+')
data = f.read(5)
if data != '12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods. Ostensibly, the mixture could just be tested
# to work when it should work according to the Python language,
# instead of fail when it should fail according to the current CPython
# implementation. People don't always program Python the way they
# should, though, and the implemenation might change in subtle ways,
# so we explicitly test for errors, too; the test will just have to
# be updated when the implementation changes.
dataoffset = 16384
filler = "ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
"spam, spam and eggs\n",
"eggs, spam, ham and spam\n",
"saussages, spam, spam and eggs\n",
"spam, ham, spam and eggs\n",
"spam, spam, spam, spam, spam, ham, spam\n",
"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("c", " "*100),))]
try:
# Prepare the testfile
bag = open(TESTFN, "w")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = open(TESTFN)
if f.next() != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
try:
meth(*args)
except ValueError:
pass
else:
self.fail("%s%r after next() didn't raise ValueError" %
(methodname, args))
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = open(TESTFN)
for i in range(nchunks):
f.next()
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("c", "\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tostring()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
# Reading after iteration hit EOF shouldn't hurt either
f = open(TESTFN)
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class FileSubclassTests(unittest.TestCase):
def testExit(self):
# test that exiting with context calls subclass' close
class C(file):
def __init__(self, *args):
self.subclass_closed = False
file.__init__(self, *args)
def close(self):
self.subclass_closed = True
file.close(self)
with C(TESTFN, 'w') as f:
pass
self.assertTrue(f.subclass_closed)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FileThreadingTests(unittest.TestCase):
# These tests check the ability to call various methods of file objects
# (including close()) concurrently without crashing the Python interpreter.
# See #815646, #595601
def setUp(self):
self._threads = test_support.threading_setup()
self.f = None
self.filename = TESTFN
with open(self.filename, "w") as f:
f.write("\n".join("0123456789"))
self._count_lock = threading.Lock()
self.close_count = 0
self.close_success_count = 0
self.use_buffering = False
def tearDown(self):
if self.f:
try:
self.f.close()
except (EnvironmentError, ValueError):
pass
try:
os.remove(self.filename)
except EnvironmentError:
pass
test_support.threading_cleanup(*self._threads)
def _create_file(self):
if self.use_buffering:
self.f = open(self.filename, "w+", buffering=1024*16)
else:
self.f = open(self.filename, "w+")
def _close_file(self):
with self._count_lock:
self.close_count += 1
self.f.close()
with self._count_lock:
self.close_success_count += 1
def _close_and_reopen_file(self):
self._close_file()
# if close raises an exception thats fine, self.f remains valid so
# we don't need to reopen.
self._create_file()
def _run_workers(self, func, nb_workers, duration=0.2):
with self._count_lock:
self.close_count = 0
self.close_success_count = 0
self.do_continue = True
threads = []
try:
for i in range(nb_workers):
t = threading.Thread(target=func)
t.start()
threads.append(t)
for _ in xrange(100):
time.sleep(duration/100)
with self._count_lock:
if self.close_count-self.close_success_count > nb_workers+1:
if test_support.verbose:
print 'Q',
break
time.sleep(duration)
finally:
self.do_continue = False
for t in threads:
t.join()
def _test_close_open_io(self, io_func, nb_workers=5):
def worker():
self._create_file()
funcs = itertools.cycle((
lambda: io_func(),
lambda: self._close_and_reopen_file(),
))
for f in funcs:
if not self.do_continue:
break
try:
f()
except (IOError, ValueError):
pass
self._run_workers(worker, nb_workers)
if test_support.verbose:
# Useful verbose statistics when tuning this test to take
# less time to run but still ensuring that its still useful.
#
# the percent of close calls that raised an error
percent = 100. - 100.*self.close_success_count/self.close_count
print self.close_count, ('%.4f ' % percent),
def test_close_open(self):
def io_func():
pass
self._test_close_open_io(io_func)
def test_close_open_flush(self):
def io_func():
self.f.flush()
self._test_close_open_io(io_func)
def test_close_open_iter(self):
def io_func():
list(iter(self.f))
self._test_close_open_io(io_func)
def test_close_open_isatty(self):
def io_func():
self.f.isatty()
self._test_close_open_io(io_func)
def test_close_open_print(self):
def io_func():
print >> self.f, ''
self._test_close_open_io(io_func)
def test_close_open_print_buffered(self):
self.use_buffering = True
def io_func():
print >> self.f, ''
self._test_close_open_io(io_func)
def test_close_open_read(self):
def io_func():
self.f.read(0)
self._test_close_open_io(io_func)
def test_close_open_readinto(self):
def io_func():
a = array('c', 'xxxxx')
self.f.readinto(a)
self._test_close_open_io(io_func)
def test_close_open_readline(self):
def io_func():
self.f.readline()
self._test_close_open_io(io_func)
def test_close_open_readlines(self):
def io_func():
self.f.readlines()
self._test_close_open_io(io_func)
def test_close_open_seek(self):
def io_func():
self.f.seek(0, 0)
self._test_close_open_io(io_func)
def test_close_open_tell(self):
def io_func():
self.f.tell()
self._test_close_open_io(io_func)
def test_close_open_truncate(self):
def io_func():
self.f.truncate()
self._test_close_open_io(io_func)
def test_close_open_write(self):
def io_func():
self.f.write('')
self._test_close_open_io(io_func)
def test_close_open_writelines(self):
def io_func():
self.f.writelines('')
self._test_close_open_io(io_func)
class StdoutTests(unittest.TestCase):
def test_move_stdout_on_write(self):
# Issue 3242: sys.stdout can be replaced (and freed) during a
# print statement; prevent a segfault in this case
save_stdout = sys.stdout
class File:
def write(self, data):
if '\n' in data:
sys.stdout = save_stdout
try:
sys.stdout = File()
print "some text"
finally:
sys.stdout = save_stdout
def test_del_stdout_before_print(self):
# Issue 4597: 'print' with no argument wasn't reporting when
# sys.stdout was deleted.
save_stdout = sys.stdout
del sys.stdout
try:
print
except RuntimeError as e:
self.assertEquals(str(e), "lost sys.stdout")
else:
self.fail("Expected RuntimeError")
finally:
sys.stdout = save_stdout
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests, FileSubclassTests,
FileThreadingTests, StdoutTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2014 Measurement Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import re
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(
os.path.dirname(__file__), '../telescope')))
import query
import utils
class BigQueryQueryGeneratorTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def normalize_whitespace(self, original):
return re.sub(r'\s+', ' ', original).strip()
def split_and_normalize_query(self, query_string):
lines = []
for line in query_string.splitlines():
# omit blank lines
if not line:
continue
lines.append(self.normalize_whitespace(line))
return lines
def assertQueriesEqual(self, expected, actual):
expected_lines = self.split_and_normalize_query(expected)
actual_lines = self.split_and_normalize_query(actual)
self.assertSequenceEqual(expected_lines, actual_lines)
def generate_ndt_query(self, start_time, end_time, metric, server_ips,
client_ip_blocks, client_country):
start_time_utc = utils.make_datetime_utc_aware(start_time)
end_time_utc = utils.make_datetime_utc_aware(end_time)
generator = query.BigQueryQueryGenerator(
start_time_utc,
end_time_utc,
metric,
server_ips=server_ips,
client_ip_blocks=client_ip_blocks,
client_country=client_country)
return generator.query()
def generate_download_throughput_query(self,
start_time,
end_time,
server_ips=None,
client_ip_blocks=None,
client_country=None):
return self.generate_ndt_query(start_time, end_time,
'download_throughput', server_ips,
client_ip_blocks, client_country)
def generate_upload_throughput_query(self,
start_time,
end_time,
server_ips=None,
client_ip_blocks=None,
client_country=None):
return self.generate_ndt_query(start_time, end_time,
'upload_throughput', server_ips,
client_ip_blocks, client_country)
def generate_average_rtt_query(self,
start_time,
end_time,
server_ips=None,
client_ip_blocks=None,
client_country=None):
return self.generate_ndt_query(start_time, end_time, 'average_rtt',
server_ips, client_ip_blocks,
client_country)
def generate_minimum_rtt_query(self,
start_time,
end_time,
server_ips=None,
client_ip_blocks=None,
client_country=None):
return self.generate_ndt_query(start_time, end_time, 'minimum_rtt',
server_ips, client_ip_blocks,
client_country)
def generate_packet_retransmit_rate_query(self,
start_time,
end_time,
server_ips=None,
client_ip_blocks=None,
client_country=None):
return self.generate_ndt_query(start_time, end_time,
'packet_retransmit_rate', server_ips,
client_ip_blocks, client_country)
def test_ndt_queries_have_no_trailing_whitespace(self):
start_time = datetime.datetime(2012, 1, 1)
end_time = datetime.datetime(2014, 10, 15)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_generators = (self.generate_average_rtt_query,
self.generate_minimum_rtt_query,
self.generate_upload_throughput_query,
self.generate_download_throughput_query)
for query_generator in query_generators:
generated_query = query_generator(start_time, end_time, server_ips,
client_ip_blocks)
self.assertNotRegexpMatches(generated_query, r'.*\s\n')
def test_ndt_download_throughput_query_full_month(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_actual = self.generate_download_throughput_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND blacklist_flags == 0
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10 OR
PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 35 AND 80)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_ndt_download_throughput_query_full_month_plus_one_second(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1, 0, 0, 1)
server_ips = ['1.1.1.1',]
client_ip_blocks = [(5, 10),]
query_actual = self.generate_download_throughput_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND blacklist_flags == 0
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212801))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_ndt_upload_throughput_query_full_month(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_actual = self.generate_upload_throughput_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsReceived /
web100_log_entry.snap.Duration) AS upload_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 0
AND connection_spec.data_direction IS NOT NULL
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND blacklist_flags == 0
AND web100_log_entry.snap.HCThruOctetsReceived >= 8192
AND web100_log_entry.snap.Duration >= 9000000
AND web100_log_entry.snap.Duration < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10 OR
PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 35 AND 80)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_ndt_average_rtt_query_full_month(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_actual = self.generate_average_rtt_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
(web100_log_entry.snap.SumRTT / web100_log_entry.snap.CountRTT) AS average_rtt
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND blacklist_flags == 0
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND web100_log_entry.snap.CountRTT > 10
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10 OR
PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 35 AND 80)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_ndt_min_rtt_query_full_month(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_actual = self.generate_minimum_rtt_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
web100_log_entry.snap.MinRTT AS minimum_rtt
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND blacklist_flags == 0
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND web100_log_entry.snap.CountRTT > 10
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10 OR
PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 35 AND 80)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_packet_retransmit_rate_query_full_month(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10), (35, 80)]
query_actual = self.generate_packet_retransmit_rate_query(
start_time, end_time, server_ips, client_ip_blocks)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
(web100_log_entry.snap.SegsRetrans /
web100_log_entry.snap.DataSegsOut) AS packet_retransmit_rate
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND blacklist_flags == 0
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10 OR
PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 35 AND 80)"""
self.assertQueriesEqual(query_expected, query_actual)
def test_ndt_download_throughput_query_v1_1_all_properties(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
server_ips = ['1.1.1.1', '2.2.2.2']
client_ip_blocks = [(5, 10)]
client_country = "us"
query_actual = self.generate_download_throughput_query(
start_time, end_time, server_ips, client_ip_blocks, client_country)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND blacklist_flags == 0
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1' OR
web100_log_entry.connection_spec.local_ip = '2.2.2.2')
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10)
AND connection_spec.client_geolocation.country_code = 'US'
"""
self.assertQueriesEqual(query_expected, query_actual)
def testDownloadThroughputQuery_OptionalProperty_ServerIPs(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND blacklist_flags == 0
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (web100_log_entry.connection_spec.local_ip = '1.1.1.1')
"""
query_actual = self.generate_download_throughput_query(
start_time, end_time, server_ips=['1.1.1.1'])
self.assertQueriesEqual(query_expected, query_actual)
def testDownloadThroughputQuery_OptionalProperty_ClientIPBlocks(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND blacklist_flags == 0
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND (PARSE_IP(web100_log_entry.connection_spec.remote_ip) BETWEEN 5 AND 10)
"""
query_actual = self.generate_download_throughput_query(
start_time, end_time,
client_ip_blocks=[(5, 10)])
self.assertQueriesEqual(query_expected, query_actual)
def testDownloadThroughputQuery_OptionalProperty_ClientCountry(self):
start_time = datetime.datetime(2014, 1, 1)
end_time = datetime.datetime(2014, 2, 1)
query_expected = """
SELECT
web100_log_entry.log_time AS timestamp,
8 * (web100_log_entry.snap.HCThruOctetsAcked /
(web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd)) AS download_mbps
FROM
plx.google:m_lab.ndt.all
WHERE
connection_spec.data_direction = 1
AND (web100_log_entry.snap.State = 1
OR (web100_log_entry.snap.State >= 5
AND web100_log_entry.snap.State <= 11))
AND blacklist_flags == 0
AND web100_log_entry.snap.CongSignals > 0
AND web100_log_entry.snap.HCThruOctetsAcked >= 8192
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) >= 9000000
AND (web100_log_entry.snap.SndLimTimeRwin +
web100_log_entry.snap.SndLimTimeCwnd +
web100_log_entry.snap.SndLimTimeSnd) < 3600000000
AND ((web100_log_entry.log_time >= 1388534400) AND (web100_log_entry.log_time < 1391212800))
AND connection_spec.client_geolocation.country_code = 'US'
"""
query_actual = self.generate_download_throughput_query(
start_time, end_time, client_country="US")
self.assertQueriesEqual(query_expected, query_actual)
if __name__ == '__main__':
unittest.main()
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""Python version compatibility support for minidom.
This module contains internal implementation details and
should not be imported; use xml.dom.minidom instead.
"""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
=======
"""Python version compatibility support for minidom.
This module contains internal implementation details and
should not be imported; use xml.dom.minidom instead.
"""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Python version compatibility support for minidom.
This module contains internal implementation details and
should not be imported; use xml.dom.minidom instead.
"""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Low-level graphics rendering.
This module provides an efficient low-level abstraction over OpenGL. It gives
very good performance for rendering OpenGL primitives; far better than the
typical immediate-mode usage and, on modern graphics cards, better than using
display lists in many cases. The module is used internally by other areas of
pyglet.
See the Programming Guide for details on how to use this graphics API.
Batches and groups
==================
Without even needing to understand the details on how to draw primitives with
the graphics API, developers can make use of `Batch` and `Group`
objects to improve performance of sprite and text rendering.
The `Sprite`, `Label` and `TextLayout` classes all accept a ``batch`` and
``group`` parameter in their constructors. A batch manages a set of objects
that will be drawn all at once, and a group describes the manner in which an
object is drawn.
The following example creates a batch, adds two sprites to the batch, and then
draws the entire batch::
batch = pyglet.graphics.Batch()
car = pyglet.sprite.Sprite(car_image, batch=batch)
boat = pyglet.sprite.Sprite(boat_image, batch=batch)
def on_draw()
batch.draw()
Drawing a complete batch is much faster than drawing the items in the batch
individually, especially when those items belong to a common group.
Groups describe the OpenGL state required for an item. This is for the most
part managed by the sprite and text classes, however you can also use groups
to ensure items are drawn in a particular order. For example, the following
example adds a background sprite which is guaranteed to be drawn before the
car and the boat::
batch = pyglet.graphics.Batch()
background = pyglet.graphics.OrderedGroup(0)
foreground = pyglet.graphics.OrderedGroup(1)
background = pyglet.sprite.Sprite(background_image,
batch=batch, group=background)
car = pyglet.sprite.Sprite(car_image, batch=batch, group=foreground)
boat = pyglet.sprite.Sprite(boat_image, batch=batch, group=foreground)
def on_draw()
batch.draw()
It's preferable to manage sprites and text objects within as few batches as
possible. If the drawing of sprites or text objects need to be interleaved
with other drawing that does not use the graphics API, multiple batches will
be required.
Data item parameters
====================
Many of the functions and methods in this module accept any number of ``data``
parameters as their final parameters. In the documentation these are notated
as ``*data`` in the formal parameter list.
A data parameter describes a vertex attribute format and an optional sequence
to initialise that attribute. Examples of common attribute formats are:
``"v3f"``
Vertex position, specified as three floats.
``"c4B"``
Vertex color, specified as four unsigned bytes.
``"t2f"``
Texture coordinate, specified as two floats.
See `pyglet.graphics.vertexattribute` for the complete syntax of the vertex
format string.
When no initial data is to be given, the data item is just the format string.
For example, the following creates a 2 element vertex list with position and
color attributes::
vertex_list = pyglet.graphics.vertex_list(2, 'v2f', 'c4B')
When initial data is required, wrap the format string and the initial data in
a tuple, for example::
vertex_list = pyglet.graphics.vertex_list(2,
('v2f', (0.0, 1.0, 1.0, 0.0)),
('c4B', (255, 255, 255, 255) * 2))
Drawing modes
=============
Methods in this module that accept a ``mode`` parameter will accept any value
in the OpenGL drawing mode enumeration: ``GL_POINTS``, ``GL_LINE_STRIP``,
``GL_LINE_LOOP``, ``GL_LINES``, ``GL_TRIANGLE_STRIP``, ``GL_TRIANGLE_FAN``,
``GL_TRIANGLES``, ``GL_QUAD_STRIP``, ``GL_QUADS``, and ``GL_POLYGON``.
::
pyglet.graphics.draw(1, GL_POINTS, ('v2i',(10,20)))
However, because of the way the graphics API renders multiple primitives with
shared state, ``GL_POLYGON``, ``GL_LINE_LOOP`` and ``GL_TRIANGLE_FAN`` cannot
be used --- the results are undefined.
When using ``GL_LINE_STRIP``, ``GL_TRIANGLE_STRIP`` or ``GL_QUAD_STRIP`` care
must be taken to insert degenerate vertices at the beginning and end of each
vertex list. For example, given the vertex list::
A, B, C, D
the correct vertex list to provide the vertex list is::
A, A, B, C, D, D
Alternatively, the ``NV_primitive_restart`` extension can be used if it is
present. This also permits use of ``GL_POLYGON``, ``GL_LINE_LOOP`` and
``GL_TRIANGLE_FAN``. Unfortunately the extension is not provided by older
video drivers, and requires indexed vertex lists.
:since: pyglet 1.1
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.gl import *
from pyglet import gl
from pyglet.graphics import vertexbuffer, vertexattribute, vertexdomain
_debug_graphics_batch = pyglet.options['debug_graphics_batch']
def draw(size, mode, *data):
'''Draw a primitive immediately.
:Parameters:
`size` : int
Number of vertices given
`mode` : gl primitive type
OpenGL drawing mode, e.g. ``GL_TRIANGLES``,
avoiding quotes.
`data` : data items
Attribute formats and data. See the module summary for
details.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
buffers = []
for format, array in data:
attribute = vertexattribute.create_attribute(format)
assert size == len(array) // attribute.count, \
'Data for %s is incorrect length' % format
buffer = vertexbuffer.create_mappable_buffer(
size * attribute.stride, vbo=False)
attribute.set_region(buffer, 0, size, array)
attribute.enable()
attribute.set_pointer(buffer.ptr)
buffers.append(buffer)
glDrawArrays(mode, 0, size)
glFlush()
glPopClientAttrib()
def draw_indexed(size, mode, indices, *data):
'''Draw a primitive with indexed vertices immediately.
:Parameters:
`size` : int
Number of vertices given
`mode` : int
OpenGL drawing mode, e.g. ``GL_TRIANGLES``
`indices` : sequence of int
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and data. See the module summary for details.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
buffers = []
for format, array in data:
attribute = vertexattribute.create_attribute(format)
assert size == len(array) // attribute.count, \
'Data for %s is incorrect length' % format
buffer = vertexbuffer.create_mappable_buffer(
size * attribute.stride, vbo=False)
attribute.set_region(buffer, 0, size, array)
attribute.enable()
attribute.set_pointer(buffer.ptr)
buffers.append(buffer)
if size <= 0xff:
index_type = GL_UNSIGNED_BYTE
index_c_type = ctypes.c_ubyte
elif size <= 0xffff:
index_type = GL_UNSIGNED_SHORT
index_c_type = ctypes.c_ushort
else:
index_type = GL_UNSIGNED_INT
index_c_type = ctypes.c_uint
index_array = (index_c_type * len(indices))(*indices)
glDrawElements(mode, len(indices), index_type, index_array)
glFlush()
glPopClientAttrib()
def _parse_data(data):
'''Given a list of data items, returns (formats, initial_arrays).'''
assert data, 'No attribute formats given'
# Return tuple (formats, initial_arrays).
formats = []
initial_arrays = []
for i, format in enumerate(data):
if isinstance(format, tuple):
format, array = format
initial_arrays.append((i, array))
formats.append(format)
formats = tuple(formats)
return formats, initial_arrays
def _get_default_batch():
shared_object_space = gl.current_context.object_space
try:
return shared_object_space.pyglet_graphics_default_batch
except AttributeError:
shared_object_space.pyglet_graphics_default_batch = Batch()
return shared_object_space.pyglet_graphics_default_batch
def vertex_list(count, *data):
'''Create a `VertexList` not associated with a batch, group or mode.
:Parameters:
`count` : int
The number of vertices in the list.
`data` : data items
Attribute formats and initial data for the vertex list. See the
module summary for details.
:rtype: `VertexList`
'''
# Note that mode=0 because the default batch is never drawn: vertex lists
# returned from this function are drawn directly by the app.
return _get_default_batch().add(count, 0, None, *data)
def vertex_list_indexed(count, indices, *data):
'''Create an `IndexedVertexList` not associated with a batch, group or mode.
:Parameters:
`count` : int
The number of vertices in the list.
`indices` : sequence
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and initial data for the vertex list. See the
module summary for details.
:rtype: `IndexedVertexList`
'''
# Note that mode=0 because the default batch is never drawn: vertex lists
# returned from this function are drawn directly by the app.
return _get_default_batch().add_indexed(count, 0, None, indices, *data)
class Batch(object):
'''Manage a collection of vertex lists for batched rendering.
Vertex lists are added to a `Batch` using the `add` and `add_indexed`
methods. An optional group can be specified along with the vertex list,
which gives the OpenGL state required for its rendering. Vertex lists
with shared mode and group are allocated into adjacent areas of memory and
sent to the graphics card in a single operation.
Call `VertexList.delete` to remove a vertex list from the batch.
'''
def __init__(self):
'''Create a graphics batch.'''
# Mapping to find domain.
# group -> (attributes, mode, indexed) -> domain
self.group_map = {}
# Mapping of group to list of children.
self.group_children = {}
# List of top-level groups
self.top_groups = []
self._draw_list = []
self._draw_list_dirty = False
def invalidate(self):
'''Force the batch to update the draw list.
This method can be used to force the batch to re-compute the draw list
when the ordering of groups has changed.
:since: pyglet 1.2
'''
self._draw_list_dirty = True
def add(self, count, mode, group, *data):
'''Add a vertex list to the batch.
:Parameters:
`count` : int
The number of vertices in the list.
`mode` : int
OpenGL drawing mode enumeration; for example, one of
``GL_POINTS``, ``GL_LINES``, ``GL_TRIANGLES``, etc.
See the module summary for additional information.
`group` : `Group`
Group of the vertex list, or ``None`` if no group is required.
`data` : data items
Attribute formats and initial data for the vertex list. See
the module summary for details.
:rtype: `VertexList`
'''
formats, initial_arrays = _parse_data(data)
domain = self._get_domain(False, mode, group, formats)
# Create vertex list and initialize
vlist = domain.create(count)
for i, array in initial_arrays:
vlist._set_attribute_data(i, array)
return vlist
def add_indexed(self, count, mode, group, indices, *data):
'''Add an indexed vertex list to the batch.
:Parameters:
`count` : int
The number of vertices in the list.
`mode` : int
OpenGL drawing mode enumeration; for example, one of
``GL_POINTS``, ``GL_LINES``, ``GL_TRIANGLES``, etc.
See the module summary for additional information.
`group` : `Group`
Group of the vertex list, or ``None`` if no group is required.
`indices` : sequence
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and initial data for the vertex list. See
the module summary for details.
:rtype: `IndexedVertexList`
'''
formats, initial_arrays = _parse_data(data)
domain = self._get_domain(True, mode, group, formats)
# Create vertex list and initialize
vlist = domain.create(count, len(indices))
start = vlist.start
vlist._set_index_data(map(lambda i: i + start, indices))
for i, array in initial_arrays:
vlist._set_attribute_data(i, array)
return vlist
def migrate(self, vertex_list, mode, group, batch):
'''Migrate a vertex list to another batch and/or group.
`vertex_list` and `mode` together identify the vertex list to migrate.
`group` and `batch` are new owners of the vertex list after migration.
The results are undefined if `mode` is not correct or if `vertex_list`
does not belong to this batch (they are not checked and will not
necessarily throw an exception immediately).
`batch` can remain unchanged if only a group change is desired.
:Parameters:
`vertex_list` : `VertexList`
A vertex list currently belonging to this batch.
`mode` : int
The current GL drawing mode of the vertex list.
`group` : `Group`
The new group to migrate to.
`batch` : `Batch`
The batch to migrate to (or the current batch).
'''
formats = vertex_list.domain.__formats
domain = batch._get_domain(False, mode, group, formats)
vertex_list.migrate(domain)
def _get_domain(self, indexed, mode, group, formats):
if group is None:
group = null_group
# Batch group
if group not in self.group_map:
self._add_group(group)
domain_map = self.group_map[group]
# Find domain given formats, indices and mode
key = (formats, mode, indexed)
try:
domain = domain_map[key]
except KeyError:
# Create domain
if indexed:
domain = vertexdomain.create_indexed_domain(*formats)
else:
domain = vertexdomain.create_domain(*formats)
domain.__formats = formats
domain_map[key] = domain
self._draw_list_dirty = True
return domain
def _add_group(self, group):
self.group_map[group] = {}
if group.parent is None:
self.top_groups.append(group)
else:
if group.parent not in self.group_map:
self._add_group(group.parent)
if group.parent not in self.group_children:
self.group_children[group.parent] = []
self.group_children[group.parent].append(group)
self._draw_list_dirty = True
def _update_draw_list(self):
'''Visit group tree in preorder and create a list of bound methods
to call.
'''
def visit(group):
draw_list = []
# Draw domains using this group
domain_map = self.group_map[group]
for (formats, mode, indexed), domain in list(domain_map.items()):
# Remove unused domains from batch
if domain._is_empty():
del domain_map[(formats, mode, indexed)]
continue
draw_list.append(
(lambda d, m: lambda: d.draw(m))(domain, mode))
# Sort and visit child groups of this group
children = self.group_children.get(group)
if children:
children.sort()
for child in list(children):
draw_list.extend(visit(child))
if children or domain_map:
return [group.set_state] + draw_list + [group.unset_state]
else:
# Remove unused group from batch
del self.group_map[group]
if group.parent:
self.group_children[group.parent].remove(group)
try:
del self.group_children[group]
except KeyError:
pass
try:
self.top_groups.remove(group)
except ValueError:
pass
return []
self._draw_list = []
self.top_groups.sort()
for group in list(self.top_groups):
self._draw_list.extend(visit(group))
self._draw_list_dirty = False
if _debug_graphics_batch:
self._dump_draw_list()
def _dump_draw_list(self):
def dump(group, indent=''):
print indent, 'Begin group', group
domain_map = self.group_map[group]
for _, domain in domain_map.items():
print indent, ' ', domain
for start, size in zip(*domain.allocator.get_allocated_regions()):
print indent, ' ', 'Region %d size %d:' % (start, size)
for key, attribute in domain.attribute_names.items():
print indent, ' ',
try:
region = attribute.get_region(attribute.buffer,
start, size)
print key, region.array[:]
except:
print key, '(unmappable)'
for child in self.group_children.get(group, ()):
dump(child, indent + ' ')
print indent, 'End group', group
print 'Draw list for %r:' % self
for group in self.top_groups:
dump(group)
def draw(self):
'''Draw the batch.
'''
if self._draw_list_dirty:
self._update_draw_list()
for func in self._draw_list:
func()
def draw_subset(self, vertex_lists):
'''Draw only some vertex lists in the batch.
The use of this method is highly discouraged, as it is quite
inefficient. Usually an application can be redesigned so that batches
can always be drawn in their entirety, using `draw`.
The given vertex lists must belong to this batch; behaviour is
undefined if this condition is not met.
:Parameters:
`vertex_lists` : sequence of `VertexList` or `IndexedVertexList`
Vertex lists to draw.
'''
# Horrendously inefficient.
def visit(group):
group.set_state()
# Draw domains using this group
domain_map = self.group_map[group]
for (_, mode, _), domain in domain_map.items():
for list in vertex_lists:
if list.domain is domain:
list.draw(mode)
# Sort and visit child groups of this group
children = self.group_children.get(group)
if children:
children.sort()
for child in children:
visit(child)
group.unset_state()
self.top_groups.sort()
for group in self.top_groups:
visit(group)
class Group(object):
'''Group of common OpenGL state.
Before a vertex list is rendered, its group's OpenGL state is set; as are
that state's ancestors' states. This can be defined arbitrarily on
subclasses; the default state change has no effect, and groups vertex
lists only in the order in which they are drawn.
'''
def __init__(self, parent=None):
'''Create a group.
:Parameters:
`parent` : `Group`
Group to contain this group; its state will be set before this
state's.
'''
self.parent = parent
def __lt__(self, other):
return hash(self) < hash(other)
def set_state(self):
'''Apply the OpenGL state change.
The default implementation does nothing.'''
pass
def unset_state(self):
'''Repeal the OpenGL state change.
The default implementation does nothing.'''
pass
def set_state_recursive(self):
'''Set this group and its ancestry.
Call this method if you are using a group in isolation: the
parent groups will be called in top-down order, with this class's
`set` being called last.
'''
if self.parent:
self.parent.set_state_recursive()
self.set_state()
def unset_state_recursive(self):
'''Unset this group and its ancestry.
The inverse of `set_state_recursive`.
'''
self.unset_state()
if self.parent:
self.parent.unset_state_recursive()
class NullGroup(Group):
'''The default group class used when ``None`` is given to a batch.
This implementation has no effect.
'''
pass
#: The default group.
#:
#: :type: `Group`
null_group = NullGroup()
class TextureGroup(Group):
'''A group that enables and binds a texture.
Texture groups are equal if their textures' targets and names are equal.
'''
# Don't use this, create your own group classes that are more specific.
# This is just an example.
def __init__(self, texture, parent=None):
'''Create a texture group.
:Parameters:
`texture` : `Texture`
Texture to bind.
`parent` : `Group`
Parent group.
'''
super(TextureGroup, self).__init__(parent)
self.texture = texture
def set_state(self):
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
def unset_state(self):
glDisable(self.texture.target)
def __hash__(self):
return hash((self.texture.target, self.texture.id, self.parent))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.texture.target == other.texture.target and
self.texture.id == other.texture.id and
self.parent == other.parent)
def __repr__(self):
return '%s(id=%d)' % (self.__class__.__name__, self.texture.id)
class OrderedGroup(Group):
'''A group with partial order.
Ordered groups with a common parent are rendered in ascending order of
their ``order`` field. This is a useful way to render multiple layers of
a scene within a single batch.
'''
# This can be useful as a top-level group, or as a superclass for other
# groups that need to be ordered.
#
# As a top-level group it's useful because graphics can be composited in a
# known order even if they don't know about each other or share any known
# group.
def __init__(self, order, parent=None):
'''Create an ordered group.
:Parameters:
`order` : int
Order of this group.
`parent` : `Group`
Parent of this group.
'''
super(OrderedGroup, self).__init__(parent)
self.order = order
def __lt__(self, other):
if isinstance(other, OrderedGroup):
return self.order < other.order
return super(OrderedGroup, self).__lt__(other)
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.order == other.order and
self.parent == other.parent)
def __hash__(self):
return hash((self.order, self.parent))
def __repr__(self):
return '%s(%d)' % (self.__class__.__name__, self.order)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.devices.OptomechDevice import *
import time
import numpy as np
from acq4.Manager import getManager
from acq4.devices.Stage import Stage, MoveFuture
from acq4.util.Thread import Thread
import acq4.pyqtgraph as pg
from acq4.util import Qt
from acq4.pyqtgraph import ptime
from acq4.util.Mutex import Mutex
class MockStage(Stage):
def __init__(self, dm, config, name):
Stage.__init__(self, dm, config, name)
self._lastMove = None
self.stageThread = MockStageThread()
self.stageThread.positionChanged.connect(self.posChanged)
self.stageThread.start()
dm.declareInterface(name, ['stage'], self)
# Global key press handling
self.modifierScales = {
Qt.Qt.Key_Control: 4.0,
Qt.Qt.Key_Alt: 0.25,
Qt.Qt.Key_Shift: 0.1,
}
self.keyDirections = np.array([
[0, 0, 1],
[0, 1, 0],
[0, 0, -1],
[-1, 0, 0],
[0, -1, 0],
[1, 0, 0],
])
self._directionKeys = set()
self._modifiers = set()
if 'keys' in config:
Qt.QCoreApplication.instance().installEventFilter(self)
self._quit = False
dm.sigAbortAll.connect(self.abort)
def capabilities(self):
"""Return a structure describing the capabilities of this device"""
if 'capabilities' in self.config:
return self.config['capabilities']
else:
return {
'getPos': (True, True, True),
'setPos': (True, True, True),
'limits': (False, False, False),
}
def _move(self, abs, rel, speed, linear):
"""Called by base stage class when the user requests to move to an
absolute or relative position.
"""
with self.lock:
self._interruptMove()
pos = self._toAbsolutePosition(abs, rel)
speed = self._interpretSpeed(speed)
self._lastMove = MockMoveFuture(self, pos, speed)
return self._lastMove
def eventFilter(self, obj, ev):
"""Catch key press/release events used for driving the stage.
"""
#if self._quit:
#return False
if ev.type() not in (Qt.QEvent.KeyPress, Qt.QEvent.KeyRelease, Qt.QEvent.ShortcutOverride):
return False
if ev.isAutoRepeat():
return False
key = str(ev.text()).lower()
keys = self.config.get('keys')
if key != '' and key in keys:
direction = keys.index(key)
if ev.type() == Qt.QEvent.KeyRelease:
self._directionKeys.discard(direction)
else:
self._directionKeys.add(direction)
elif ev.key() in self.modifierScales:
if ev.type() == Qt.QEvent.KeyRelease:
self._modifiers.discard(ev.key())
else:
self._modifiers.add(ev.key())
else:
return False
self._updateKeySpeed()
return False
def _updateKeySpeed(self):
s = 1000e-6
for mod in self._modifiers:
s = s * self.modifierScales[mod]
vec = np.array([0, 0, 0])
for key in self._directionKeys:
vec = vec + self.keyDirections[key] * s
self.startMoving(vec)
def stop(self):
with self.lock:
self.abort()
def abort(self):
self._interruptMove()
self.stageThread.stop()
def _interruptMove(self):
if self._lastMove is not None and not self._lastMove.isDone():
self._lastMove._interrupted = True
def setUserSpeed(self, v):
pass
def _getPosition(self):
return self.stageThread.getPosition()
def targetPosition(self):
with self.lock:
if self._lastMove is None or self._lastMove.isDone():
return self.getPosition()
else:
return self._lastMove.targetPos
def startMoving(self, vel):
"""Begin moving the stage at a continuous velocity.
"""
with self.lock:
self._interruptMove()
vel1 = np.zeros(3)
vel1[:len(vel)] = vel
self.stageThread.setVelocity(vel1)
def quit(self):
self.abort()
self.stageThread.quit()
self._quit = True
class MockMoveFuture(MoveFuture):
"""Provides access to a move-in-progress on a mock manipulator.
"""
def __init__(self, dev, pos, speed):
MoveFuture.__init__(self, dev, pos, speed)
self.targetPos = pos
self._finished = False
self._interrupted = False
self._errorMsg = None
self.dev.stageThread.setTarget(self, pos, speed)
def wasInterrupted(self):
"""Return True if the move was interrupted before completing.
"""
return self._interrupted
def isDone(self):
"""Return True if the move is complete or was interrupted.
"""
return self._finished or self._interrupted
def errorMessage(self):
return self._errorMsg
class MockStageThread(Thread):
"""Thread used to simulate stage hardware.
It is necessary for this to be in a thread because some stage users will
block while waiting for a stage movement to complete.
"""
positionChanged = Qt.Signal(object)
def __init__(self):
self.pos = np.zeros(3)
self.target = None
self.speed = None
self.velocity = None
self._quit = False
self.lock = Mutex()
self.interval = 30e-3
self.lastUpdate = None
self.currentMove = None
Thread.__init__(self)
def start(self):
self._quit = False
self.lastUpdate = ptime.time()
Thread.start(self)
def stop(self):
with self.lock:
self.target = None
self.speed = None
self.velocity = None
def quit(self):
with self.lock:
self._quit = True
def setTarget(self, future, target, speed):
"""Begin moving toward a target position.
"""
with self.lock:
self.currentMove = future
self.target = target
self.speed = speed
self.velocity = None
def setVelocity(self, vel):
with self.lock:
self.currentMove = None
self.target = None
self.speed = None
self.velocity = vel
def getPosition(self):
with self.lock:
return self.pos.copy()
def run(self):
lastUpdate = ptime.time()
while True:
with self.lock:
if self._quit:
break
target = self.target
speed = self.speed
velocity = self.velocity
currentMove = self.currentMove
pos = self.pos
now = ptime.time()
dt = now - lastUpdate
lastUpdate = now
if target is not None:
dif = target - pos
dist = np.linalg.norm(dif)
stepDist = speed * dt
if stepDist >= dist:
self._setPosition(target)
self.currentMove._finished = True
self.stop()
else:
unit = dif / dist
step = unit * stepDist
self._setPosition(pos + step)
elif self.velocity is not None and not np.all(velocity == 0):
self._setPosition(pos + velocity * dt)
time.sleep(self.interval)
def _setPosition(self, pos):
self.pos = np.array(pos)
self.positionChanged.emit(pos)
#class MockStageInterface(Qt.QWidget):
#def __init__(self, dev, win, keys=None):
#self.win = win
#self.dev = dev
#Qt.QWidget.__init__(self)
#self.layout = Qt.QGridLayout()
#self.setLayout(self.layout)
#self.btn = pg.JoystickButton()
#self.layout.addWidget(self.btn, 0, 0)
#self.label = Qt.QLabel()
#self.layout.addWidget(self.label)
#self.dev.sigPositionChanged.connect(self.update)
#self.btn.sigStateChanged.connect(self.btnChanged)
#self.label.setFixedWidth(300)
#def btnChanged(self, btn, state):
#self.dev.setSpeed((state[0] * 0.0001, state[1] * 0.0001))
#def update(self):
#pos = self.dev.getPosition()
#text = [pg.siFormat(x, suffix='m', precision=5) for x in pos]
#self.label.setText(", ".join(text))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from openstack.baremetal.v1 import _proxy
from openstack.baremetal.v1 import allocation
from openstack.baremetal.v1 import chassis
from openstack.baremetal.v1 import driver
from openstack.baremetal.v1 import node
from openstack.baremetal.v1 import port
from openstack.baremetal.v1 import port_group
from openstack import exceptions
from openstack.tests.unit import base
from openstack.tests.unit import test_proxy_base
_MOCK_METHOD = 'openstack.baremetal.v1._proxy.Proxy._get_with_fields'
class TestBaremetalProxy(test_proxy_base.TestProxyBase):
def setUp(self):
super(TestBaremetalProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
def test_drivers(self):
self.verify_list(self.proxy.drivers, driver.Driver)
def test_get_driver(self):
self.verify_get(self.proxy.get_driver, driver.Driver)
@mock.patch.object(chassis.Chassis, 'list')
def test_chassis_detailed(self, mock_list):
result = self.proxy.chassis(details=True, query=1)
self.assertIs(result, mock_list.return_value)
mock_list.assert_called_once_with(self.proxy, details=True, query=1)
@mock.patch.object(chassis.Chassis, 'list')
def test_chassis_not_detailed(self, mock_list):
result = self.proxy.chassis(query=1)
self.assertIs(result, mock_list.return_value)
mock_list.assert_called_once_with(self.proxy, details=False, query=1)
def test_create_chassis(self):
self.verify_create(self.proxy.create_chassis, chassis.Chassis)
def test_find_chassis(self):
self.verify_find(self.proxy.find_chassis, chassis.Chassis)
def test_get_chassis(self):
self.verify_get(self.proxy.get_chassis, chassis.Chassis,
mock_method=_MOCK_METHOD,
expected_kwargs={'fields': None})
def test_update_chassis(self):
self.verify_update(self.proxy.update_chassis, chassis.Chassis)
def test_delete_chassis(self):
self.verify_delete(self.proxy.delete_chassis, chassis.Chassis, False)
def test_delete_chassis_ignore(self):
self.verify_delete(self.proxy.delete_chassis, chassis.Chassis, True)
@mock.patch.object(node.Node, 'list')
def test_nodes_detailed(self, mock_list):
result = self.proxy.nodes(details=True, query=1)
self.assertIs(result, mock_list.return_value)
mock_list.assert_called_once_with(self.proxy, details=True, query=1)
@mock.patch.object(node.Node, 'list')
def test_nodes_not_detailed(self, mock_list):
result = self.proxy.nodes(query=1)
self.assertIs(result, mock_list.return_value)
mock_list.assert_called_once_with(self.proxy, details=False, query=1)
def test_create_node(self):
self.verify_create(self.proxy.create_node, node.Node)
def test_find_node(self):
self.verify_find(self.proxy.find_node, node.Node)
def test_get_node(self):
self.verify_get(self.proxy.get_node, node.Node,
mock_method=_MOCK_METHOD,
expected_kwargs={'fields': None})
@mock.patch.object(node.Node, 'commit', autospec=True)
def test_update_node(self, mock_commit):
self.proxy.update_node('uuid', instance_id='new value')
mock_commit.assert_called_once_with(mock.ANY, self.proxy,
retry_on_conflict=True)
self.assertEqual('new value', mock_commit.call_args[0][0].instance_id)
@mock.patch.object(node.Node, 'commit', autospec=True)
def test_update_node_no_retries(self, mock_commit):
self.proxy.update_node('uuid', instance_id='new value',
retry_on_conflict=False)
mock_commit.assert_called_once_with(mock.ANY, self.proxy,
retry_on_conflict=False)
self.assertEqual('new value', mock_commit.call_args[0][0].instance_id)
def test_delete_node(self):
self.verify_delete(self.proxy.delete_node, node.Node, False)
def test_delete_node_ignore(self):
self.verify_delete(self.proxy.delete_node, node.Node, True)
@mock.patch.object(port.Port, 'list')
def test_ports_detailed(self, mock_list):
result = self.proxy.ports(details=True, query=1)
self.assertIs(result, mock_list.return_value)
mock_list.assert_called_once_with(self.proxy, details=True, query=1)
@mock.patch.object(port.Port, 'list')
def test_ports_not_detailed(self, mock_list):
result = self.proxy.ports(query=1)
self.assertIs(result, mock_list.return_value)
mock_list.assert_called_once_with(self.proxy, details=False, query=1)
def test_create_port(self):
self.verify_create(self.proxy.create_port, port.Port)
def test_find_port(self):
self.verify_find(self.proxy.find_port, port.Port)
def test_get_port(self):
self.verify_get(self.proxy.get_port, port.Port,
mock_method=_MOCK_METHOD,
expected_kwargs={'fields': None})
def test_update_port(self):
self.verify_update(self.proxy.update_port, port.Port)
def test_delete_port(self):
self.verify_delete(self.proxy.delete_port, port.Port, False)
def test_delete_port_ignore(self):
self.verify_delete(self.proxy.delete_port, port.Port, True)
@mock.patch.object(port_group.PortGroup, 'list')
def test_port_groups_detailed(self, mock_list):
result = self.proxy.port_groups(details=True, query=1)
self.assertIs(result, mock_list.return_value)
mock_list.assert_called_once_with(self.proxy, details=True, query=1)
@mock.patch.object(port_group.PortGroup, 'list')
def test_port_groups_not_detailed(self, mock_list):
result = self.proxy.port_groups(query=1)
self.assertIs(result, mock_list.return_value)
mock_list.assert_called_once_with(self.proxy, details=False, query=1)
def test_get_port_group(self):
self.verify_get(self.proxy.get_port_group, port_group.PortGroup,
mock_method=_MOCK_METHOD,
expected_kwargs={'fields': None})
def test_create_allocation(self):
self.verify_create(self.proxy.create_allocation, allocation.Allocation)
def test_get_allocation(self):
self.verify_get(self.proxy.get_allocation, allocation.Allocation,
mock_method=_MOCK_METHOD,
expected_kwargs={'fields': None})
def test_delete_allocation(self):
self.verify_delete(self.proxy.delete_allocation, allocation.Allocation,
False)
def test_delete_allocation_ignore(self):
self.verify_delete(self.proxy.delete_allocation, allocation.Allocation,
True)
@mock.patch.object(node.Node, 'fetch', autospec=True)
def test__get_with_fields_none(self, mock_fetch):
result = self.proxy._get_with_fields(node.Node, 'value')
self.assertIs(result, mock_fetch.return_value)
mock_fetch.assert_called_once_with(mock.ANY, self.proxy,
error_message=mock.ANY)
@mock.patch.object(node.Node, 'fetch', autospec=True)
def test__get_with_fields(self, mock_fetch):
result = self.proxy._get_with_fields(node.Node, 'value',
fields=['a', 'b'])
self.assertIs(result, mock_fetch.return_value)
mock_fetch.assert_called_once_with(mock.ANY, self.proxy,
error_message=mock.ANY,
fields='a,b')
@mock.patch('time.sleep', lambda _sec: None)
@mock.patch.object(_proxy.Proxy, 'get_node', autospec=True)
class TestWaitForNodesProvisionState(base.TestCase):
def setUp(self):
super(TestWaitForNodesProvisionState, self).setUp()
self.session = mock.Mock()
self.proxy = _proxy.Proxy(self.session)
def test_success(self, mock_get):
# two attempts, one node succeeds after the 1st
nodes = [mock.Mock(spec=node.Node, id=str(i))
for i in range(3)]
for i, n in enumerate(nodes):
# 1st attempt on 1st node, 2nd attempt on 2nd node
n._check_state_reached.return_value = not (i % 2)
mock_get.side_effect = nodes
result = self.proxy.wait_for_nodes_provision_state(
['abcd', node.Node(id='1234')], 'fake state')
self.assertEqual([nodes[0], nodes[2]], result)
for n in nodes:
n._check_state_reached.assert_called_once_with(
self.proxy, 'fake state', True)
def test_timeout(self, mock_get):
mock_get.return_value._check_state_reached.return_value = False
mock_get.return_value.id = '1234'
self.assertRaises(exceptions.ResourceTimeout,
self.proxy.wait_for_nodes_provision_state,
['abcd', node.Node(id='1234')], 'fake state',
timeout=0.001)
mock_get.return_value._check_state_reached.assert_called_with(
self.proxy, 'fake state', True)
|
|
# coding=utf-8
# Copyright 2019 The Interval Bound Propagation Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the output specifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from absl import logging
from interval_bound_propagation.src import bounds as bounds_lib
from interval_bound_propagation.src import verifiable_wrapper
import six
import sonnet as snt
import tensorflow.compat.v1 as tf
@six.add_metaclass(abc.ABCMeta)
class Specification(snt.AbstractModule):
"""Defines a specification."""
def __init__(self, name, collapse=True):
super(Specification, self).__init__(name=name)
self._collapse = collapse
@abc.abstractmethod
def _build(self, modules):
"""Computes the worst-case specification value."""
@abc.abstractmethod
def evaluate(self, logits):
"""Computes the specification value.
Args:
logits: The logits Tensor can have different shapes, i.e.,
[batch_size, num_classes]: The output should be [batch_size, num_specs].
[num_restarts, batch_size, num_classes]: The output should be
[num_restarts, batch_size, num_specs]. Used by UntargetedPGDAttack.
[num_restarts, num_specs, batch_size, num_classes]: The output should
be [num_restarts, batch_size, num_specs]. For this case, the
specifications must be evaluated individually for each column
(axis = 1). Used by MultiTargetedPGDAttack.
Returns:
The specification values evaluated at the network output.
"""
@abc.abstractproperty
def num_specifications(self):
"""Returns the number of specifications."""
@property
def collapse(self):
return self._collapse
class LinearSpecification(Specification):
"""Linear specifications: c^T * z_K + d <= 0."""
def __init__(self, c, d=None, prune_irrelevant=True, collapse=True):
"""Builds a linear specification module."""
super(LinearSpecification, self).__init__(name='specs', collapse=collapse)
# c has shape [batch_size, num_specifications, num_outputs]
# d has shape [batch_size, num_specifications]
# Some specifications may be irrelevant (not a function of the output).
# We automatically remove them for clarity. We expect the number of
# irrelevant specs to be equal for all elements of a batch.
# Shape is [batch_size, num_specifications]
if prune_irrelevant:
irrelevant = tf.equal(tf.reduce_sum(
tf.cast(tf.abs(c) > 1e-6, tf.int32), axis=-1, keepdims=True), 0)
batch_size = tf.shape(c)[0]
num_outputs = tf.shape(c)[2]
irrelevant = tf.tile(irrelevant, [1, 1, num_outputs])
self._c = tf.reshape(
tf.boolean_mask(c, tf.logical_not(irrelevant)),
[batch_size, -1, num_outputs])
else:
self._c = c
self._d = d
def _build(self, modules):
"""Outputs specification value."""
# inputs have shape [batch_size, num_outputs].
if not (self.collapse and
isinstance(modules[-1], verifiable_wrapper.LinearFCWrapper)):
logging.info('Elision of last layer disabled.')
bounds = modules[-1].output_bounds
w = self._c
b = self._d
else:
logging.info('Elision of last layer active.')
# Collapse the last layer.
bounds = modules[-1].input_bounds
w = modules[-1].module.w
b = modules[-1].module.b
w = tf.einsum('ijk,lk->ijl', self._c, w)
b = tf.einsum('ijk,k->ij', self._c, b)
if self._d is not None:
b += self._d
# Maximize z * w + b s.t. lower <= z <= upper.
bounds = bounds_lib.IntervalBounds.convert(bounds)
c = (bounds.lower + bounds.upper) / 2.
r = (bounds.upper - bounds.lower) / 2.
c = tf.einsum('ij,ikj->ik', c, w)
if b is not None:
c += b
r = tf.einsum('ij,ikj->ik', r, tf.abs(w))
# output has shape [batch_size, num_specifications].
return c + r
def evaluate(self, logits):
if len(logits.shape) == 2:
output = tf.einsum('ij,ikj->ik', logits, self._c)
elif len(logits.shape) == 3:
output = tf.einsum('rij,ikj->rik', logits, self._c)
else:
assert len(logits.shape) == 4
output = tf.einsum('rsbo,bso->rbs', logits, self._c)
if self._d is not None:
output += self._d
return output
@property
def num_specifications(self):
return tf.shape(self._c)[1]
@property
def c(self):
return self._c
@property
def d(self):
return self._d
class ClassificationSpecification(Specification):
"""Creates a linear specification that corresponds to a classification.
This class is not a standard LinearSpecification as it does not materialize
the c and d tensors.
"""
def __init__(self, label, num_classes, collapse=True):
super(ClassificationSpecification, self).__init__(name='specs',
collapse=collapse)
self._label = label
self._num_classes = num_classes
# Precompute indices.
with self._enter_variable_scope():
indices = []
for i in range(self._num_classes):
indices.append(list(range(i)) + list(range(i + 1, self._num_classes)))
indices = tf.constant(indices, dtype=tf.int32)
self._correct_idx, self._wrong_idx = self._build_indices(label, indices)
def _build(self, modules):
if not (self.collapse and
isinstance(modules[-1], verifiable_wrapper.LinearFCWrapper)):
logging.info('Elision of last layer disabled.')
bounds = modules[-1].output_bounds
bounds = bounds_lib.IntervalBounds.convert(bounds)
correct_class_logit = tf.gather_nd(bounds.lower, self._correct_idx)
wrong_class_logits = tf.gather_nd(bounds.upper, self._wrong_idx)
return wrong_class_logits - tf.expand_dims(correct_class_logit, 1)
logging.info('Elision of last layer active.')
bounds = modules[-1].input_bounds
bounds = bounds_lib.IntervalBounds.convert(bounds)
batch_size = tf.shape(bounds.lower)[0]
w = modules[-1].module.w
b = modules[-1].module.b
w_t = tf.tile(tf.expand_dims(tf.transpose(w), 0), [batch_size, 1, 1])
b_t = tf.tile(tf.expand_dims(b, 0), [batch_size, 1])
w_correct = tf.expand_dims(tf.gather_nd(w_t, self._correct_idx), -1)
b_correct = tf.expand_dims(tf.gather_nd(b_t, self._correct_idx), 1)
w_wrong = tf.transpose(tf.gather_nd(w_t, self._wrong_idx), [0, 2, 1])
b_wrong = tf.gather_nd(b_t, self._wrong_idx)
w = w_wrong - w_correct
b = b_wrong - b_correct
# Maximize z * w + b s.t. lower <= z <= upper.
c = (bounds.lower + bounds.upper) / 2.
r = (bounds.upper - bounds.lower) / 2.
c = tf.einsum('ij,ijk->ik', c, w)
if b is not None:
c += b
r = tf.einsum('ij,ijk->ik', r, tf.abs(w))
return c + r
def evaluate(self, logits):
if len(logits.shape) == 2:
correct_class_logit = tf.gather_nd(logits, self._correct_idx)
correct_class_logit = tf.expand_dims(correct_class_logit, -1)
wrong_class_logits = tf.gather_nd(logits, self._wrong_idx)
elif len(logits.shape) == 3:
# [num_restarts, batch_size, num_classes] to
# [num_restarts, batch_size, num_specs]
logits = tf.transpose(logits, [1, 2, 0]) # Put restart dimension last.
correct_class_logit = tf.gather_nd(logits, self._correct_idx)
correct_class_logit = tf.transpose(correct_class_logit)
correct_class_logit = tf.expand_dims(correct_class_logit, -1)
wrong_class_logits = tf.gather_nd(logits, self._wrong_idx)
wrong_class_logits = tf.transpose(wrong_class_logits, [2, 0, 1])
else:
assert len(logits.shape) == 4
# [num_restarts, num_specs, batch_size, num_classes] to
# [num_restarts, batch_size, num_specs].
logits = tf.transpose(logits, [2, 3, 1, 0])
correct_class_logit = tf.gather_nd(logits, self._correct_idx)
correct_class_logit = tf.transpose(correct_class_logit, [2, 0, 1])
batch_size = tf.shape(logits)[0]
wrong_idx = tf.concat([
self._wrong_idx,
tf.tile(tf.reshape(tf.range(self.num_specifications, dtype=tf.int32),
[1, self.num_specifications, 1]),
[batch_size, 1, 1])], axis=-1)
wrong_class_logits = tf.gather_nd(logits, wrong_idx)
wrong_class_logits = tf.transpose(wrong_class_logits, [2, 0, 1])
return wrong_class_logits - correct_class_logit
@property
def num_specifications(self):
return self._num_classes - 1
@property
def correct_idx(self):
return self._correct_idx
@property
def wrong_idx(self):
return self._wrong_idx
def _build_indices(self, label, indices):
batch_size = tf.shape(label)[0]
i = tf.range(batch_size, dtype=tf.int32)
correct_idx = tf.stack([i, tf.cast(label, tf.int32)], axis=1)
wrong_idx = tf.stack([
tf.tile(tf.reshape(i, [batch_size, 1]), [1, self._num_classes - 1]),
tf.gather(indices, label),
], axis=2)
return correct_idx, wrong_idx
class TargetedClassificationSpecification(ClassificationSpecification):
"""Defines a specification that compares the true class with another."""
def __init__(self, label, num_classes, target_class, collapse=True):
super(TargetedClassificationSpecification, self).__init__(
label, num_classes, collapse=collapse)
batch_size = tf.shape(label)[0]
if len(target_class.shape) == 1:
target_class = tf.reshape(target_class, [batch_size, 1])
self._num_specifications = target_class.shape[1].value
if self._num_specifications is None:
raise ValueError('Cannot retrieve the number of target classes')
self._target_class = target_class
i = tf.range(batch_size, dtype=tf.int32)
self._wrong_idx = tf.stack([
tf.tile(tf.reshape(i, [batch_size, 1]), [1, self.num_specifications]),
target_class
], axis=2)
@property
def target_class(self):
"""Returns the target class index."""
return self._target_class
@property
def num_specifications(self):
return self._num_specifications
class RandomClassificationSpecification(TargetedClassificationSpecification):
"""Creates a single random specification that targets a random class."""
def __init__(self, label, num_classes, num_targets=1, seed=None,
collapse=True):
# Overwrite the target indices. Each session.run() call gets new target
# indices, the indices should remain the same across restarts.
batch_size = tf.shape(label)[0]
j = tf.random.uniform(shape=(batch_size, num_targets), minval=1,
maxval=num_classes, dtype=tf.int32, seed=seed)
target_class = tf.mod(tf.cast(tf.expand_dims(label, -1), tf.int32) + j,
num_classes)
super(RandomClassificationSpecification, self).__init__(
label, num_classes, target_class, collapse=collapse)
class LeastLikelyClassificationSpecification(
TargetedClassificationSpecification):
"""Creates a single specification that targets the least likely class."""
def __init__(self, label, num_classes, logits, num_targets=1, collapse=True):
# Do not target the true class. If the true class is the least likely to
# be predicted, it is fine to target any other class as the attack will
# be successful anyways.
j = tf.nn.top_k(-logits, k=num_targets, sorted=False).indices
l = tf.expand_dims(label, 1)
target_class = tf.mod(
j + tf.cast(tf.equal(j, tf.cast(l, tf.int32)), tf.int32), num_classes)
super(LeastLikelyClassificationSpecification, self).__init__(
label, num_classes, target_class, collapse=collapse)
|
|
import re
import os
from zope.interface.interfaces import ComponentLookupError
from zope.component import getUtility
from node.ext.directory import Directory
from node.ext.template import (
XMLTemplate,
DTMLTemplate,
)
from node.ext.zcml import (
ZCMLFile,
SimpleDirective,
)
from agx.core import (
handler,
token,
)
from agx.core.interfaces import IScope
from agx.core.util import (
read_target_node,
dotted_path,
)
from node.ext import python
from node.ext.python.utils import Imports
from node.ext.uml.utils import (
TaggedValues,
UNSET,
)
from agx.generator.pyegg.utils import (
templatepath,
set_copyright,
implicit_dotted_path,
egg_source,
)
from agx.generator.zca.utils import addZcmlRef
from node.ext.python import Attribute
from agx.generator.pyegg.utils import class_full_name
from agx.generator.zca import utils as zcautils
@handler('plonebrowserview', 'uml2fs', 'plonegenerator',
'viewclass', order=150)
def plonebrowserview(self, source, target):
view = source
if view.stereotype('pyegg:function'):
# XXX: <<function>> <<adapter>> on class
return
tok = token(str(view.uuid), True, browserpages=[])
pack = source.parent
target = read_target_node(pack, target.target)
targetclass = read_target_node(view, target)
if isinstance(target, python.Module):
targetdir = target.parent
else:
targetdir = target
path = targetdir.path
path.append('browser.zcml')
fullpath = os.path.join(*path)
if 'browser.zcml' not in targetdir:
zcml = ZCMLFile(fullpath)
zcml.nsmap['browser'] = 'http://namespaces.zope.org/browser'
targetdir['browser.zcml'] = zcml
else:
zcml = targetdir['browser.zcml']
addZcmlRef(targetdir, zcml)
targettok = token(
str(targetclass.uuid), True, browserpages=[], provides=None)
_for = [token(str(context.supplier.uuid), False).fullpath \
for context in tok.browserpages] or ['*']
classpath = class_full_name(targetclass)
tgv = TaggedValues(view)
# create the templates dir
if 'templates' not in targetdir.keys():
targetdir['templates'] = Directory('templates')
templates = targetdir['templates']
templates.factories['.pt'] = XMLTemplate
#create the browser:page entries
for bp in tok.browserpages or [None]:
#name of view: if it should have a constant name, change the last param
viewname = tgv.direct('name', 'plone:view', None) or \
tgv.direct('name', 'plone:dynamic_view', view.xminame.lower())
name_raw = tgv.direct('name', 'plone:view', None) or \
tgv.direct('name', 'plone:vdynamic_view', None)
name = name_raw or view.xminame.lower()
template_name_raw = tgv.direct('template_name', 'plone:view', None) or \
tgv.direct('template_name', 'plone:dynamic_view', None)
template_name = template_name_raw or name + '.pt'
permission = tgv.direct('permission', 'plone:view', None) or \
tgv.direct('permission', 'plone:dynamic_view', None)
layer = tgv.direct('layer', 'plone:view', None) or \
tgv.direct('layer', 'plone:dynamic_view', None)
if bp:
bptgv = TaggedValues(bp)
bptok = token(str(bp.supplier.uuid), False)
_for = bptok.fullpath
print 'xminame:',bp,bp.xminame
# consider uuid as an unset name
if bp.xminame is None or re.match('[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', bp.xminame):
bpname = None
else:
bpname = bp.xminame.lower()
if bp.xminame:
viewname = bp.xminame
viewname = bptgv.direct('name', 'plone:view', None) or \
bptgv.direct('name', 'plone:dynamic_view', viewname)
name = bptgv.direct('name', 'plone:view', None) or \
bptgv.direct('name', 'plone:dynamic_view', bpname or name)
# override template name
template_name = bptgv.direct(
'template_name', 'plone:view', None) or \
bptgv.direct(
'template_name', 'plone:dynamic_view', None) or \
template_name_raw or name + '.pt'
permission = bptgv.direct('permission', 'plone:view', None) or \
bptgv.direct('permission', 'plone:dynamic_view', permission)
layer = bptgv.direct('layer', 'plone:view', None) or \
bptgv.direct('layer', 'plone:dynamic_view', layer)
else:
_for = '*'
found_browserpages = zcml.filter(
tag='browser:page', attr='name', value=viewname)
browser = None
templatepath = 'templates/' + template_name
if found_browserpages:
for br in found_browserpages: #check if it really matches
if br.attrs.get('class') == classpath and \
_for == br.attrs['for']:
browser = br
break
if not browser:
browser = SimpleDirective(name='browser:page', parent=zcml)
browser.attrs['for'] = _for
if viewname and not viewname is UNSET:
browser.attrs['name'] = viewname
browser.attrs['class'] = classpath
browser.attrs['template'] = templatepath
browser.attrs['permission'] = permission or 'zope2.View'
if layer:
browser.attrs['layer'] = layer
# spit out the page vanilla template
if template_name not in templates.keys():
pt = XMLTemplate()
templates[template_name] = pt
# set template for viewtemplate
pt.template = 'agx.generator.plone:templates/viewtemplate.pt'
@handler('zcviewdepcollect', 'uml2fs', 'connectorgenerator',
'dependency', order=140)
def zcviewdepcollect(self, source, target):
"""Collect all view dependencies
"""
scope = getUtility(IScope, 'uml2fs.viewclass')
pack = source.parent
dep = source
context = source.supplier
view = source.client
if not scope(view): #we are only interested in views!
return
target = read_target_node(pack, target.target)
targetcontext = read_target_node(context, target)
targetview = read_target_node(view, target)
tok = token(str(view.uuid), True, browserpages=[])
contexttok = token(str(context.uuid), True, fullpath=None)
if targetcontext:
try:
dont_generate = token(str(targetcontext.uuid),
False, dont_generate=False).dont_generate
if dont_generate:
iface = targetcontext.parent.classes(
'I' + targetcontext.classname)[0]
contexttok.fullpath = class_full_name(iface)
else:
contexttok.fullpath = class_full_name(targetcontext)
# dont_generate doesnt seem to be defined here
except ComponentLookupError:
pass
# its a stub
else:
contexttok.fullpath = '.'.join(
[TaggedValues(context).direct('import', 'pyegg:stub'),
context.name])
if isinstance(target, python.Module):
targetdir = target.parent
else:
targetdir = target
tok.browserpages.append(dep)
@handler('zcviewfinalize', 'uml2fs', 'plonegenerator', 'viewclass', order=145)
def zcviewfinalize(self, source, target):
"""Create zope interface.
"""
if source.stereotype('pyegg:stub') is not None:
return
view = source
targetview = read_target_node(view, target.target)
name = source.name
module = targetview.parent
imp = Imports(module)
imp.set('Products.Five', [['BrowserView', None]])
set_copyright(source, module)
if module.classes(name):
class_ = module.classes(name)[0]
else:
class_ = python.Class(name)
module[name] = class_
if 'BrowserView' not in targetview.bases:
targetview.bases.append('BrowserView')
###############################
# XXX: move below to separate module
@handler('plone__init__', 'uml2fs', 'hierarchygenerator', 'gsprofile', order=30)
def plone__init__(self, source, target):
egg = egg_source(source)
eggname = egg.name
targetdir = read_target_node(source, target.target)
module = targetdir['__init__.py']
imp = Imports(module)
imp.set('zope.i18nmessageid', [['MessageFactory', None]])
value = 'MessageFactory("%s")' % eggname
atts = [att for att in module.attributes() if '_' in att.targets]
if atts:
atts[0].value = value
else:
module['_'] = Attribute('_', value)
@handler('resourcedirectory', 'uml2fs', 'zcasemanticsgenerator',
'gsprofile', order=50)
def resourcedirectory(self, source, target):
"""Create resource directory and register in ZCML.
"""
egg = egg_source(source)
eggname = egg.name
targetdir = read_target_node(source, target.target)
if 'resources' not in targetdir.keys():
targetdir['resources'] = Directory()
path = targetdir.path
path.append('browser.zcml')
fullpath = os.path.join(*path)
if 'browser.zcml' not in targetdir:
zcml = ZCMLFile(fullpath)
zcml.nsmap['browser'] = 'http://namespaces.zope.org/browser'
targetdir['browser.zcml'] = zcml
else:
zcml = targetdir['browser.zcml']
addZcmlRef(targetdir, zcml)
# add the resourceDirectory stmt
zcautils.set_zcml_directive(targetdir, 'browser.zcml', 'include', 'package', "zope.browserresource", file="meta.zcml")
if not zcml.filter(
tag='browser:resourceDirectory', attr='name', value=eggname):
directory = SimpleDirective(
name='browser:resourceDirectory', parent=zcml)
directory.attrs['name'] = eggname
directory.attrs['directory'] = 'resources'
@handler('resourceregistries', 'uml2fs', 'zcasemanticsgenerator',
'gsprofile', order=60)
def resourceregistries(self, source, target):
"""Create main.css and main.js file in resources directory.
Runs after browser.zcml has been created.
"""
egg = egg_source(source)
eggname = egg.name
targetdir = read_target_node(source, target.target)
resources = targetdir['resources']
resources.factories['main.css'] = DTMLTemplate
resources.factories['main.js'] = DTMLTemplate
if not 'main.css' in resources:
css = resources['main.css'] = DTMLTemplate()
else:
css = resources['main.css']
css.template = 'agx.generator.plone:templates/main.css.dtml'
css.params['project'] = eggname
css.SECTION_BEGIN = '/* code-section'
css.SECTION_END = '/* /code-section'
css.SECTION_POSTFIX = ' */'
if not 'main.js' in resources:
js = resources['main.js'] = DTMLTemplate()
else:
js = resources['main.js']
js.template = 'agx.generator.plone:templates/main.js.dtml'
js.params['project'] = eggname
js.SECTION_BEGIN = '// code-section'
js.SECTION_END = '// /code-section'
js.SECTION_POSTFIX = ''
|
|
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""googledatastore helper."""
import calendar
import datetime
import logging
import os
import httplib2
from oauth2client import client
from oauth2client import gce
from googledatastore import connection
from googledatastore.connection import datastore_v1_pb2
__all__ = [
'get_credentials_from_env',
'add_key_path',
'add_properties',
'set_property',
'set_value',
'get_value',
'get_property_dict',
'set_kind',
'add_property_orders',
'add_projection',
'set_property_filter',
'set_composite_filter',
'to_timestamp_usec',
'from_timestamp_usec',
]
def get_credentials_from_env():
"""Get datastore credentials from the environment.
Try and fallback on the following credentials in that order:
- Google APIs Signed JWT credentials based on
DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
environment variables
- Compute Engine service account
- No credentials (development server)
Returns:
datastore credentials.
"""
# If DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
# environment variables are defined: use Google APIs Console Service
# Accounts (signed JWT). Note that the corresponding service account
# should be an admin of the datastore application.
service_account = os.getenv('DATASTORE_SERVICE_ACCOUNT')
key_path = os.getenv('DATASTORE_PRIVATE_KEY_FILE')
if service_account and key_path:
with open(key_path, 'rb') as f:
key = f.read()
credentials = client.SignedJwtAssertionCredentials(
service_account, key, connection.SCOPE)
logging.info('connecting using DatastoreSignedJwtCredentials')
return credentials
try:
# Fallback on getting Compute Engine credentials from the metadata server
# to connect to the datastore service. Note that the corresponding
# service account should be an admin of the datastore application.
credentials = gce.AppAssertionCredentials(connection.SCOPE)
http = httplib2.Http()
credentials.authorize(http)
# Force first credentials refresh to detect if we are running on
# Compute Engine.
credentials.refresh(http)
logging.info('connecting using compute credentials')
return credentials
except (client.AccessTokenRefreshError, httplib2.HttpLib2Error):
# Fallback on no credentials if no DATASTORE_ environment
# variables are defined and Compute Engine auth failed. Note that
# it will only authorize calls to the development server.
logging.info('connecting using no credentials')
return None
def get_dataset_from_env():
"""Get datastore dataset_id from the environment.
Try and fallback on the following sources in that order:
- DATASTORE_DATASET environment variables
- Cloud Project ID from Compute Engine metadata server.
- None
Returns:
datastore dataset id.
"""
# If DATASTORE_DATASET environment variable is defined return it.
dataset_id = os.getenv('DATASTORE_DATASET')
if dataset_id:
return dataset_id
# Fallback on returning the Cloud Project ID from Compute Engine
# metadata server.
try:
_, content = httplib2.Http().request(
'http://metadata/computeMetadata/v1/project/project-id',
headers={'X-Google-Metadata-Request': 'True'})
return content
except httplib2.HttpLib2Error:
return None
def add_key_path(key_proto, *path_elements):
"""Add path elements to the given datastore.Key proto message.
Args:
key_proto: datastore.Key proto message.
path_elements: list of ancestors to add to the key.
(kind1, id1/name1, ..., kindN, idN/nameN), the last 2 elements
represent the entity key, if no terminating id/name: they key
will be an incomplete key.
Raises:
TypeError: the given id or name has the wrong type.
Returns:
the same datastore.Key.
Usage:
>>> add_key_path(key_proto, 'Kind', 'name') # no parent, with name
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind2', 1) # no parent, with id
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2', 1) # parent, complete
datastore.Key(...)
>>> add_key_path(key_proto, 'Kind', 'name', 'Kind2') # parent, incomplete
datastore.Key(...)
"""
for i in range(0, len(path_elements), 2):
pair = path_elements[i:i+2]
elem = key_proto.path_element.add()
elem.kind = pair[0]
if len(pair) == 1:
return # incomplete key
id_or_name = pair[1]
if isinstance(id_or_name, (int, long)):
elem.id = id_or_name
elif isinstance(id_or_name, basestring):
elem.name = id_or_name
else:
raise TypeError(
'Expected an integer id or string name as argument %d; '
'received %r (a %s).' % (i + 2, id_or_name, type(id_or_name)))
return key_proto
def add_properties(entity_proto, property_dict, indexed=None):
"""Add values to the given datastore.Entity proto message.
Args:
entity_proto: datastore.Entity proto message.
property_dict: a dictionary from property name to either a python object or
datastore.Value.
indexed: if the property values should be indexed. None leaves indexing as
is (defaults to True if value is a python object).
Usage:
>>> add_properties(proto, {'foo': u'a', 'bar': [1, 2]})
Raises:
TypeError: if a given property value type is not supported.
"""
for name, value in property_dict.iteritems():
set_property(entity_proto.property.add(), name, value, indexed)
def set_property(property_proto, name, value, indexed=None):
"""Set property value in the given datastore.Property proto message.
Args:
property_proto: datastore.Property proto message.
name: name of the property.
value: python object or datastore.Value.
indexed: if the value should be indexed. None leaves indexing as is
(defaults to True if value is a python object).
Usage:
>>> set_property(property_proto, 'foo', u'a')
Raises:
TypeError: if the given value type is not supported.
"""
property_proto.Clear()
property_proto.name = name
set_value(property_proto.value, value, indexed)
def set_value(value_proto, value, indexed=None):
"""Set the corresponding datastore.Value _value field for the given arg.
Args:
value_proto: datastore.Value proto message.
value: python object or datastore.Value. (unicode value will set a
datastore string value, str value will set a blob string value).
Undefined behavior if value is/contains value_proto.
indexed: if the value should be indexed. None leaves indexing as is
(defaults to True if value is not a Value message).
Raises:
TypeError: if the given value type is not supported.
"""
value_proto.Clear()
if isinstance(value, (list, tuple)):
for sub_value in value:
set_value(value_proto.list_value.add(), sub_value, indexed)
return # do not set indexed for a list property.
if isinstance(value, datastore_v1_pb2.Value):
value_proto.MergeFrom(value)
elif isinstance(value, unicode):
value_proto.string_value = value
elif isinstance(value, str):
value_proto.blob_value = value
elif isinstance(value, bool):
value_proto.boolean_value = value
elif isinstance(value, int):
value_proto.integer_value = value
elif isinstance(value, long):
# Proto will complain if the value is too large.
value_proto.integer_value = value
elif isinstance(value, float):
value_proto.double_value = value
elif isinstance(value, datetime.datetime):
value_proto.timestamp_microseconds_value = to_timestamp_usec(value)
elif isinstance(value, datastore_v1_pb2.Key):
value_proto.key_value.CopyFrom(value)
elif isinstance(value, datastore_v1_pb2.Entity):
value_proto.entity_value.CopyFrom(value)
else:
raise TypeError('value type: %r not supported' % (value,))
if isinstance(indexed, bool) and indexed:
value_proto.ClearField('indexed') # The default is true.
elif indexed is not None:
value_proto.indexed = indexed
def get_value(value_proto):
"""Gets the python object equivalent for the given value proto.
Args:
value_proto: datastore.Value proto message.
Returns:
the corresponding python object value. timestamps are converted to
datetime, and datastore.Value is returned for blob_key_value.
"""
for f in ('string_value',
'blob_value',
'boolean_value',
'integer_value',
'double_value',
'key_value',
'entity_value'):
if value_proto.HasField(f):
return getattr(value_proto, f)
if value_proto.HasField('timestamp_microseconds_value'):
return from_timestamp_usec(value_proto.timestamp_microseconds_value)
if value_proto.HasField('blob_key_value'):
return value_proto
if value_proto.list_value:
return [get_value(sub_value) for sub_value in value_proto.list_value]
return None
def get_property_dict(entity_proto):
"""Convert datastore.Entity to a dict of property name -> datastore.Value.
Args:
entity_proto: datastore.Entity proto message.
Usage:
>>> get_property_dict(entity_proto)
{'foo': {string_value='a'}, 'bar': {integer_value=2}}
Returns:
dict of entity properties.
"""
return dict((p.name, p.value) for p in entity_proto.property)
def set_kind(query_proto, kind):
"""Set the kind constraint for the given datastore.Query proto message."""
del query_proto.kind[:]
query_proto.kind.add().name = kind
def add_property_orders(query_proto, *orders):
"""Add ordering constraint for the given datastore.Query proto message.
Args:
query_proto: datastore.Query proto message.
orders: list of propertype name string, default to ascending
order and set descending if prefixed by '-'.
Usage:
>>> add_property_orders(query_proto, 'foo') # sort by foo asc
>>> add_property_orders(query_proto, '-bar') # sort by bar desc
"""
for order in orders:
proto = query_proto.order.add()
if order[0] == '-':
order = order[1:]
proto.direction = datastore_v1_pb2.PropertyOrder.DESCENDING
proto.property.name = order
def add_projection(query_proto, *projection):
"""Add projection properties to the given datatstore.Query proto message."""
for p in projection:
proto = query_proto.projection.add()
proto.property.name = p
def set_property_filter(filter_proto, name, op, value):
"""Set property filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
name: property name
op: datastore.PropertyFilter.Operation
value: property value
Returns:
the same datastore.Filter.
Usage:
>>> set_property_filter(filter_proto, 'foo',
... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a'
"""
filter_proto.Clear()
pf = filter_proto.property_filter
pf.property.name = name
pf.operator = op
set_value(pf.value, value)
return filter_proto
def set_composite_filter(filter_proto, op, *filters):
"""Set composite filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
op: datastore.CompositeFilter.Operation
filters: vararg list of datastore.Filter
Returns:
the same datastore.Filter.
Usage:
>>> set_composite_filter(filter_proto, datastore.CompositeFilter.AND,
... set_property_filter(datastore.Filter(), ...),
... set_property_filter(datastore.Filter(), ...)) # WHERE ... AND ...
"""
filter_proto.Clear()
cf = filter_proto.composite_filter
cf.operator = op
for f in filters:
cf.filter.add().CopyFrom(f)
return filter_proto
_EPOCH = datetime.datetime.utcfromtimestamp(0)
def from_timestamp_usec(timestamp):
"""Convert microsecond timestamp to datetime."""
return _EPOCH + datetime.timedelta(microseconds=timestamp)
def to_timestamp_usec(dt):
"""Convert datetime to microsecond timestamp.
Args:
dt: a timezone naive datetime.
Returns:
a microsecond timestamp as a long.
Raises:
TypeError: if a timezone aware datetime was provided.
"""
if dt.tzinfo:
# this is an "aware" datetime with an explicit timezone. Throw an error.
raise TypeError('Cannot store a timezone aware datetime. '
'Convert to UTC and store the naive datetime.')
return long(calendar.timegm(dt.timetuple()) * 1000000L) + dt.microsecond
|
|
from memsql.common.connection_pool import MySQLError
from memsql.common import json, errorcodes, sql_utility
import time
from datetime import datetime
from contextlib import contextmanager
PRIMARY_TABLE = """\
CREATE TABLE IF NOT EXISTS %(prefix)s_tasks (
id INT AUTO_INCREMENT PRIMARY KEY,
created TIMESTAMP DEFAULT NOW(),
data JSON,
INDEX (created)
)"""
EXECUTION_TABLE = """\
CREATE TABLE IF NOT EXISTS %(prefix)s_executions (
id INT AUTO_INCREMENT PRIMARY KEY,
task_id INT,
steps JSON,
started TIMESTAMP DEFAULT NOW(),
last_contact TIMESTAMP DEFAULT 0,
finished TIMESTAMP DEFAULT 0,
UNIQUE INDEX (task_id),
INDEX (started),
INDEX (last_contact)
)"""
class StepAlreadyStarted(Exception):
pass
class StepNotStarted(Exception):
pass
class StepAlreadyFinished(Exception):
pass
class AlreadyFinished(Exception):
pass
class StepRunning(Exception):
pass
class SQLStepQueue(sql_utility.SQLUtility):
def __init__(self, table_prefix="", execution_ttl=60 * 5):
"""
Initialize the SQLStepQueue with the specified table prefix and
execution TTL (in seconds).
"""
super(SQLStepQueue, self).__init__()
self.execution_ttl = execution_ttl
self.table_prefix = table_prefix.rstrip('_') + '_stepqueue'
self.tasks_table = self.table_prefix + '_tasks'
self.executions_table = self.table_prefix + '_executions'
self._define_table(self.tasks_table, PRIMARY_TABLE % { 'prefix': self.table_prefix })
self._define_table(self.executions_table, EXECUTION_TABLE % { 'prefix': self.table_prefix })
###############################
# Public Interface
def qsize(self):
""" Return an approximate number of queued tasks in the queue. """
count = self._query_queued('COUNT(*) AS count')
return count[0].count
def enqueue(self, data):
""" Enqueue task with specified data. """
jsonified_data = json.dumps(data)
with self._db_conn() as conn:
conn.execute(
'INSERT INTO %s_tasks (data) VALUES (%%s)' % self.table_prefix,
jsonified_data
)
def start(self, block=False, timeout=None, retry_interval=0.5):
"""
Retrieve a task handler from the queue.
If block is True, this function will block until it is able to retrieve a task.
If block is True and timeout is a number it will block for at most <timeout> seconds.
retry_interval is the time in seconds between successive retries.
"""
start = time.time()
while 1:
task_handler = self._dequeue_task()
if task_handler is None and block:
if timeout is not None and (time.time() - start) > timeout:
break
time.sleep(retry_interval)
else:
break
return task_handler
###############################
# Private Interface
def _query_queued(self, projection):
with self._db_conn() as conn:
result = conn.query('''
SELECT
%(projection)s
FROM
%(prefix)s_tasks AS tsk
LEFT JOIN %(prefix)s_executions AS exc ON tsk.id = exc.task_id
WHERE
(
exc.task_id IS NULL
OR (
exc.finished = 0
AND exc.last_contact <= NOW() - INTERVAL %(ttl)s SECOND
)
)
ORDER BY tsk.created ASC -- oldest first
''' % {
'projection': projection,
'prefix': self.table_prefix,
'ttl': self.execution_ttl
})
return result
def _dequeue_task(self):
try:
with self._db_conn() as conn:
conn.execute('''
DELETE FROM %(prefix)s_executions
WHERE
finished = 0
AND last_contact <= NOW() - INTERVAL %(ttl)s SECOND
''' % { 'prefix': self.table_prefix, 'ttl': self.execution_ttl })
execution_id = conn.execute('''
INSERT INTO %(prefix)s_executions (task_id, last_contact, steps)
SELECT
tsk.id, NOW(), '[]'
FROM
%(prefix)s_tasks AS tsk
LEFT JOIN %(prefix)s_executions AS exc ON tsk.id = exc.task_id
WHERE
exc.task_id IS NULL
ORDER BY tsk.created ASC -- oldest first
LIMIT 1
''' % {
'prefix': self.table_prefix,
'ttl': self.execution_ttl
})
if execution_id == 0:
# select returned no rows
return None
return TaskHandler(execution_id=execution_id, queue=self)
except MySQLError as (errno, msg):
if errno == errorcodes.ER_DUP_ENTRY:
return None
else:
raise
class TaskHandler(object):
def __init__(self, execution_id, queue):
self._execution_id = execution_id
self._queue = queue
self.finished = 0
self.task_id = None
self.data = None
self.steps = None
self._refresh()
###############################
# Public Interface
def valid(self):
if self.finished != 0:
return False
with self._db_conn() as conn:
row = conn.get('''
SELECT
(last_contact > NOW() - INTERVAL %(ttl)s SECOND) AS valid
FROM %(prefix)s_executions
WHERE id = %%s
''' % {
'prefix': self._queue.table_prefix,
'ttl': self._queue.execution_ttl
}, self._execution_id)
return bool(row is not None and row.valid)
def ping(self):
""" Notify the queue that this task is still active. """
if self.finished != 0:
raise AlreadyFinished()
with self._db_conn() as conn:
conn.execute('''
UPDATE %(prefix)s_executions
SET last_contact=NOW()
WHERE id = %%s
''' % { 'prefix': self._queue.table_prefix }, self._execution_id)
def finish(self):
if self._running_steps() != 0:
raise StepRunning()
if self.finished != 0:
raise AlreadyFinished()
self.finished = datetime.utcnow()
self._save()
def start_step(self, step_name):
""" Start a step. """
if self.finished != 0:
raise AlreadyFinished()
step_data = self._get_step(step_name)
if step_data is not None:
if 'stop' in step_data:
raise StepAlreadyFinished()
else:
raise StepAlreadyStarted()
self.steps.append({
"start": datetime.utcnow(),
"name": step_name
})
self._save()
def stop_step(self, step_name):
""" Stop a step. """
if self.finished != 0:
raise AlreadyFinished()
step_data = self._get_step(step_name)
if step_data is None:
raise StepNotStarted()
elif 'stop' in step_data:
raise StepAlreadyFinished()
step_data['stop'] = datetime.utcnow()
step_data['duration'] = (step_data['stop'] - step_data['start']).total_seconds()
self._save()
@contextmanager
def step(self, step_name):
self.start_step(step_name)
yield
self.stop_step(step_name)
###############################
# Private Interface
def _get_step(self, step_name):
for step in self.steps:
if step['name'] == step_name:
return step
return None
def _running_steps(self):
return len([s for s in self.steps if 'stop' not in s])
def _db_conn(self):
return self._queue._db_conn()
def _refresh(self):
with self._db_conn() as conn:
row = conn.get('''
SELECT
exc.task_id, tsk.data, exc.steps
FROM
%(prefix)s_tasks AS tsk
INNER JOIN %(prefix)s_executions AS exc ON tsk.id = exc.task_id
WHERE exc.id = %%s
''' % { 'prefix': self._queue.table_prefix }, self._execution_id)
self.task_id = row.task_id
self.data = json.loads(row.data)
self.steps = json.loads(row.steps)
def _save(self):
with self._db_conn() as conn:
conn.execute('''
UPDATE %(prefix)s_executions
SET
last_contact=NOW(),
steps=%%s,
finished=%%s
WHERE id = %%s
''' % {
'prefix': self._queue.table_prefix
}, json.dumps(self.steps), self.finished, self._execution_id)
|
|
from __future__ import division
from pygal.colors import (
parse_color, unparse_color,
rgb_to_hsl, hsl_to_rgb, darken, lighten, saturate, desaturate, rotate)
def test_parse_color():
assert parse_color('#123') == (17, 34, 51, 1., '#rgb')
assert parse_color('#cdf') == (204, 221, 255, 1., '#rgb')
assert parse_color('#a3d7') == (170, 51, 221, 119 / 255, '#rgba')
assert parse_color('#584b4f') == (88, 75, 79, 1., '#rrggbb')
assert parse_color('#8cbe22') == (140, 190, 34, 1., '#rrggbb')
assert parse_color('#16cbf055') == (22, 203, 240, 1 / 3, '#rrggbbaa')
assert parse_color('rgb(134, 67, 216)') == (134, 67, 216, 1., 'rgb')
assert parse_color('rgb(0, 111, 222)') == (0, 111, 222, 1., 'rgb')
assert parse_color('rgba(237, 83, 48, .8)') == (237, 83, 48, .8, 'rgba')
assert parse_color('rgba(0, 1, 0, 0.1223)') == (0, 1, 0, .1223, 'rgba')
def test_unparse_color():
assert unparse_color(17, 34, 51, 1., '#rgb') == '#123'
assert unparse_color(204, 221, 255, 1., '#rgb') == '#cdf'
assert unparse_color(170, 51, 221, 119 / 255, '#rgba') == '#a3d7'
assert unparse_color(88, 75, 79, 1., '#rrggbb') == '#584b4f'
assert unparse_color(140, 190, 34, 1., '#rrggbb') == '#8cbe22'
assert unparse_color(22, 203, 240, 1 / 3, '#rrggbbaa') == '#16cbf055'
assert unparse_color(134, 67, 216, 1., 'rgb') == 'rgb(134, 67, 216)'
assert unparse_color(0, 111, 222, 1., 'rgb') == 'rgb(0, 111, 222)'
assert unparse_color(237, 83, 48, .8, 'rgba') == 'rgba(237, 83, 48, 0.8)'
assert unparse_color(0, 1, 0, .1223, 'rgba') == 'rgba(0, 1, 0, 0.1223)'
def test_darken():
assert darken('#800', 20) == '#200'
assert darken('#800e', 20) == '#200e'
assert darken('#800', 0) == '#800'
assert darken('#ffffff', 10) == '#e6e6e6'
assert darken('#000000', 10) == '#000000'
assert darken('#f3148a', 25) == '#810747'
assert darken('#f3148aab', 25) == '#810747ab'
assert darken('#121212', 1) == '#0f0f0f'
assert darken('#999999', 100) == '#000000'
assert darken('#99999999', 100) == '#00000099'
assert darken('#1479ac', 8) == '#105f87'
assert darken('rgb(136, 0, 0)', 20) == 'rgb(34, 0, 0)'
assert darken('rgba(20, 121, 172, .13)', 8) == 'rgba(16, 95, 135, 0.13)'
def test_lighten():
assert lighten('#800', 20) == '#e00'
assert lighten('#800', 0) == '#800'
assert lighten('#ffffff', 10) == '#ffffff'
assert lighten('#000000', 10) == '#1a1a1a'
assert lighten('#f3148a', 25) == '#f98dc6'
assert lighten('#121212', 1) == '#151515'
assert lighten('#999999', 100) == '#ffffff'
assert lighten('#1479ac', 8) == '#1893d1'
def test_saturate():
assert saturate('#000', 20) == '#000'
assert saturate('#fff', 20) == '#fff'
assert saturate('#8a8', 100) == '#3f3'
assert saturate('#855', 20) == '#9e3f3f'
def test_desaturate():
assert desaturate('#000', 20) == '#000'
assert desaturate('#fff', 20) == '#fff'
assert desaturate('#8a8', 100) == '#999'
assert desaturate('#855', 20) == '#726b6b'
def test_rotate():
assert rotate('#000', 45) == '#000'
assert rotate('#fff', 45) == '#fff'
assert rotate('#811', 45) == '#886a11'
assert rotate('#8a8', 360) == '#8a8'
assert rotate('#8a8', 0) == '#8a8'
assert rotate('#8a8', -360) == '#8a8'
def test_hsl_to_rgb_part_0():
assert hsl_to_rgb(0, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(60, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(120, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(180, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(240, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(300, 100, 50) == (255, 0, 255)
def test_rgb_to_hsl_part_0():
assert rgb_to_hsl(255, 0, 0) == (0, 100, 50)
assert rgb_to_hsl(255, 255, 0) == (60, 100, 50)
assert rgb_to_hsl(0, 255, 0) == (120, 100, 50)
assert rgb_to_hsl(0, 255, 255) == (180, 100, 50)
assert rgb_to_hsl(0, 0, 255) == (240, 100, 50)
assert rgb_to_hsl(255, 0, 255) == (300, 100, 50)
def test_hsl_to_rgb_part_1():
assert hsl_to_rgb(-360, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(-300, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(-240, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(-180, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(-120, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(-60, 100, 50) == (255, 0, 255)
def test_rgb_to_hsl_part_1():
# assert rgb_to_hsl(255, 0, 0) == (-360, 100, 50)
# assert rgb_to_hsl(255, 255, 0) == (-300, 100, 50)
# assert rgb_to_hsl(0, 255, 0) == (-240, 100, 50)
# assert rgb_to_hsl(0, 255, 255) == (-180, 100, 50)
# assert rgb_to_hsl(0, 0, 255) == (-120, 100, 50)
# assert rgb_to_hsl(255, 0, 255) == (-60, 100, 50)
pass
def test_hsl_to_rgb_part_2():
assert hsl_to_rgb(360, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(420, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(480, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(540, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(600, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(660, 100, 50) == (255, 0, 255)
def test_rgb_to_hsl_part_2():
# assert rgb_to_hsl(255, 0, 0) == (360, 100, 50)
# assert rgb_to_hsl(255, 255, 0) == (420, 100, 50)
# assert rgb_to_hsl(0, 255, 0) == (480, 100, 50)
# assert rgb_to_hsl(0, 255, 255) == (540, 100, 50)
# assert rgb_to_hsl(0, 0, 255) == (600, 100, 50)
# assert rgb_to_hsl(255, 0, 255) == (660, 100, 50)
pass
def test_hsl_to_rgb_part_3():
assert hsl_to_rgb(6120, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(-9660, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(99840, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(-900, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(-104880, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(2820, 100, 50) == (255, 0, 255)
def test_rgb_to_hsl_part_3():
# assert rgb_to_hsl(255, 0, 0) == (6120, 100, 50)
# assert rgb_to_hsl(255, 255, 0) == (-9660, 100, 50)
# assert rgb_to_hsl(0, 255, 0) == (99840, 100, 50)
# assert rgb_to_hsl(0, 255, 255) == (-900, 100, 50)
# assert rgb_to_hsl(0, 0, 255) == (-104880, 100, 50)
# assert rgb_to_hsl(255, 0, 255) == (2820, 100, 50)
pass
def test_hsl_to_rgb_part_4():
assert hsl_to_rgb(0, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(12, 100, 50) == (255, 51, 0)
assert hsl_to_rgb(24, 100, 50) == (255, 102, 0)
assert hsl_to_rgb(36, 100, 50) == (255, 153, 0)
assert hsl_to_rgb(48, 100, 50) == (255, 204, 0)
assert hsl_to_rgb(60, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(72, 100, 50) == (204, 255, 0)
assert hsl_to_rgb(84, 100, 50) == (153, 255, 0)
assert hsl_to_rgb(96, 100, 50) == (102, 255, 0)
assert hsl_to_rgb(108, 100, 50) == (51, 255, 0)
assert hsl_to_rgb(120, 100, 50) == (0, 255, 0)
def test_rgb_to_hsl_part_4():
assert rgb_to_hsl(255, 0, 0) == (0, 100, 50)
assert rgb_to_hsl(255, 51, 0) == (12, 100, 50)
assert rgb_to_hsl(255, 102, 0) == (24, 100, 50)
assert rgb_to_hsl(255, 153, 0) == (36, 100, 50)
assert rgb_to_hsl(255, 204, 0) == (48, 100, 50)
assert rgb_to_hsl(255, 255, 0) == (60, 100, 50)
assert rgb_to_hsl(204, 255, 0) == (72, 100, 50)
assert rgb_to_hsl(153, 255, 0) == (84, 100, 50)
assert rgb_to_hsl(102, 255, 0) == (96, 100, 50)
assert rgb_to_hsl(51, 255, 0) == (108, 100, 50)
assert rgb_to_hsl(0, 255, 0) == (120, 100, 50)
def test_hsl_to_rgb_part_5():
assert hsl_to_rgb(120, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(132, 100, 50) == (0, 255, 51)
assert hsl_to_rgb(144, 100, 50) == (0, 255, 102)
assert hsl_to_rgb(156, 100, 50) == (0, 255, 153)
assert hsl_to_rgb(168, 100, 50) == (0, 255, 204)
assert hsl_to_rgb(180, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(192, 100, 50) == (0, 204, 255)
assert hsl_to_rgb(204, 100, 50) == (0, 153, 255)
assert hsl_to_rgb(216, 100, 50) == (0, 102, 255)
assert hsl_to_rgb(228, 100, 50) == (0, 51, 255)
assert hsl_to_rgb(240, 100, 50) == (0, 0, 255)
def test_rgb_to_hsl_part_5():
assert rgb_to_hsl(0, 255, 0) == (120, 100, 50)
assert rgb_to_hsl(0, 255, 51) == (132, 100, 50)
assert rgb_to_hsl(0, 255, 102) == (144, 100, 50)
assert rgb_to_hsl(0, 255, 153) == (156, 100, 50)
assert rgb_to_hsl(0, 255, 204) == (168, 100, 50)
assert rgb_to_hsl(0, 255, 255) == (180, 100, 50)
assert rgb_to_hsl(0, 204, 255) == (192, 100, 50)
assert rgb_to_hsl(0, 153, 255) == (204, 100, 50)
assert rgb_to_hsl(0, 102, 255) == (216, 100, 50)
assert rgb_to_hsl(0, 51, 255) == (228, 100, 50)
assert rgb_to_hsl(0, 0, 255) == (240, 100, 50)
def test_hsl_to_rgb_part_6():
assert hsl_to_rgb(240, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(252, 100, 50) == (51, 0, 255)
assert hsl_to_rgb(264, 100, 50) == (102, 0, 255)
assert hsl_to_rgb(276, 100, 50) == (153, 0, 255)
assert hsl_to_rgb(288, 100, 50) == (204, 0, 255)
assert hsl_to_rgb(300, 100, 50) == (255, 0, 255)
assert hsl_to_rgb(312, 100, 50) == (255, 0, 204)
assert hsl_to_rgb(324, 100, 50) == (255, 0, 153)
assert hsl_to_rgb(336, 100, 50) == (255, 0, 102)
assert hsl_to_rgb(348, 100, 50) == (255, 0, 51)
assert hsl_to_rgb(360, 100, 50) == (255, 0, 0)
def test_rgb_to_hsl_part_6():
assert rgb_to_hsl(0, 0, 255) == (240, 100, 50)
assert rgb_to_hsl(51, 0, 255) == (252, 100, 50)
assert rgb_to_hsl(102, 0, 255) == (264, 100, 50)
assert rgb_to_hsl(153, 0, 255) == (276, 100, 50)
assert rgb_to_hsl(204, 0, 255) == (288, 100, 50)
assert rgb_to_hsl(255, 0, 255) == (300, 100, 50)
assert rgb_to_hsl(255, 0, 204) == (312, 100, 50)
assert rgb_to_hsl(255, 0, 153) == (324, 100, 50)
assert rgb_to_hsl(255, 0, 102) == (336, 100, 50)
assert rgb_to_hsl(255, 0, 51) == (348, 100, 50)
# assert rgb_to_hsl(255, 0, 0) == (360, 100, 50)
def test_hsl_to_rgb_part_7():
assert hsl_to_rgb(0, 20, 50) == (153, 102, 102)
assert hsl_to_rgb(0, 60, 50) == (204, 51, 51)
assert hsl_to_rgb(0, 100, 50) == (255, 0, 0)
def test_rgb_to_hsl_part_7():
assert rgb_to_hsl(153, 102, 102) == (0, 20, 50)
assert rgb_to_hsl(204, 51, 51) == (0, 60, 50)
assert rgb_to_hsl(255, 0, 0) == (0, 100, 50)
def test_hsl_to_rgb_part_8():
assert hsl_to_rgb(60, 20, 50) == (153, 153, 102)
assert hsl_to_rgb(60, 60, 50) == (204, 204, 51)
assert hsl_to_rgb(60, 100, 50) == (255, 255, 0)
def test_rgb_to_hsl_part_8():
assert rgb_to_hsl(153, 153, 102) == (60, 20, 50)
assert rgb_to_hsl(204, 204, 51) == (60, 60, 50)
assert rgb_to_hsl(255, 255, 0) == (60, 100, 50)
def test_hsl_to_rgb_part_9():
assert hsl_to_rgb(120, 20, 50) == (102, 153, 102)
assert hsl_to_rgb(120, 60, 50) == (51, 204, 51)
assert hsl_to_rgb(120, 100, 50) == (0, 255, 0)
def test_rgb_to_hsl_part_9():
assert rgb_to_hsl(102, 153, 102) == (120, 20, 50)
assert rgb_to_hsl(51, 204, 51) == (120, 60, 50)
assert rgb_to_hsl(0, 255, 0) == (120, 100, 50)
def test_hsl_to_rgb_part_10():
assert hsl_to_rgb(180, 20, 50) == (102, 153, 153)
assert hsl_to_rgb(180, 60, 50) == (51, 204, 204)
assert hsl_to_rgb(180, 100, 50) == (0, 255, 255)
def test_rgb_to_hsl_part_10():
assert rgb_to_hsl(102, 153, 153) == (180, 20, 50)
assert rgb_to_hsl(51, 204, 204) == (180, 60, 50)
assert rgb_to_hsl(0, 255, 255) == (180, 100, 50)
def test_hsl_to_rgb_part_11():
assert hsl_to_rgb(240, 20, 50) == (102, 102, 153)
assert hsl_to_rgb(240, 60, 50) == (51, 51, 204)
assert hsl_to_rgb(240, 100, 50) == (0, 0, 255)
def test_rgb_to_hsl_part_11():
assert rgb_to_hsl(102, 102, 153) == (240, 20, 50)
assert rgb_to_hsl(51, 51, 204) == (240, 60, 50)
assert rgb_to_hsl(0, 0, 255) == (240, 100, 50)
def test_hsl_to_rgb_part_12():
assert hsl_to_rgb(300, 20, 50) == (153, 102, 153)
assert hsl_to_rgb(300, 60, 50) == (204, 51, 204)
assert hsl_to_rgb(300, 100, 50) == (255, 0, 255)
def test_rgb_to_hsl_part_12():
assert rgb_to_hsl(153, 102, 153) == (300, 20, 50)
assert rgb_to_hsl(204, 51, 204) == (300, 60, 50)
assert rgb_to_hsl(255, 0, 255) == (300, 100, 50)
def test_hsl_to_rgb_part_13():
assert hsl_to_rgb(0, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(0, 100, 10) == (51, 0, 0)
assert hsl_to_rgb(0, 100, 20) == (102, 0, 0)
assert hsl_to_rgb(0, 100, 30) == (153, 0, 0)
assert hsl_to_rgb(0, 100, 40) == (204, 0, 0)
assert hsl_to_rgb(0, 100, 50) == (255, 0, 0)
assert hsl_to_rgb(0, 100, 60) == (255, 51, 51)
assert hsl_to_rgb(0, 100, 70) == (255, 102, 102)
assert hsl_to_rgb(0, 100, 80) == (255, 153, 153)
assert hsl_to_rgb(0, 100, 90) == (255, 204, 204)
assert hsl_to_rgb(0, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_13():
assert rgb_to_hsl(0, 0, 0) == (0, 0, 0)
assert rgb_to_hsl(51, 0, 0) == (0, 100, 10)
assert rgb_to_hsl(102, 0, 0) == (0, 100, 20)
assert rgb_to_hsl(153, 0, 0) == (0, 100, 30)
assert rgb_to_hsl(204, 0, 0) == (0, 100, 40)
assert rgb_to_hsl(255, 0, 0) == (0, 100, 50)
assert rgb_to_hsl(255, 51, 51) == (0, 100, 60)
assert rgb_to_hsl(255, 102, 102) == (0, 100, 70)
assert rgb_to_hsl(255, 153, 153) == (0, 100, 80)
assert rgb_to_hsl(255, 204, 204) == (0, 100, 90)
assert rgb_to_hsl(255, 255, 255) == (0, 0, 100)
def test_hsl_to_rgb_part_14():
assert hsl_to_rgb(60, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(60, 100, 10) == (51, 51, 0)
assert hsl_to_rgb(60, 100, 20) == (102, 102, 0)
assert hsl_to_rgb(60, 100, 30) == (153, 153, 0)
assert hsl_to_rgb(60, 100, 40) == (204, 204, 0)
assert hsl_to_rgb(60, 100, 50) == (255, 255, 0)
assert hsl_to_rgb(60, 100, 60) == (255, 255, 51)
assert hsl_to_rgb(60, 100, 70) == (255, 255, 102)
assert hsl_to_rgb(60, 100, 80) == (255, 255, 153)
assert hsl_to_rgb(60, 100, 90) == (255, 255, 204)
assert hsl_to_rgb(60, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_14():
# assert rgb_to_hsl(0, 0, 0) == (60, 100, 0)
assert rgb_to_hsl(51, 51, 0) == (60, 100, 10)
assert rgb_to_hsl(102, 102, 0) == (60, 100, 20)
assert rgb_to_hsl(153, 153, 0) == (60, 100, 30)
assert rgb_to_hsl(204, 204, 0) == (60, 100, 40)
assert rgb_to_hsl(255, 255, 0) == (60, 100, 50)
assert rgb_to_hsl(255, 255, 51) == (60, 100, 60)
assert rgb_to_hsl(255, 255, 102) == (60, 100, 70)
assert rgb_to_hsl(255, 255, 153) == (60, 100, 80)
assert rgb_to_hsl(255, 255, 204) == (60, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (60, 100, 100)
def test_hsl_to_rgb_part_15():
assert hsl_to_rgb(120, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(120, 100, 10) == (0, 51, 0)
assert hsl_to_rgb(120, 100, 20) == (0, 102, 0)
assert hsl_to_rgb(120, 100, 30) == (0, 153, 0)
assert hsl_to_rgb(120, 100, 40) == (0, 204, 0)
assert hsl_to_rgb(120, 100, 50) == (0, 255, 0)
assert hsl_to_rgb(120, 100, 60) == (51, 255, 51)
assert hsl_to_rgb(120, 100, 70) == (102, 255, 102)
assert hsl_to_rgb(120, 100, 80) == (153, 255, 153)
assert hsl_to_rgb(120, 100, 90) == (204, 255, 204)
assert hsl_to_rgb(120, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_15():
# assert rgb_to_hsl(0, 0, 0) == (120, 100, 0)
assert rgb_to_hsl(0, 51, 0) == (120, 100, 10)
assert rgb_to_hsl(0, 102, 0) == (120, 100, 20)
assert rgb_to_hsl(0, 153, 0) == (120, 100, 30)
assert rgb_to_hsl(0, 204, 0) == (120, 100, 40)
assert rgb_to_hsl(0, 255, 0) == (120, 100, 50)
assert rgb_to_hsl(51, 255, 51) == (120, 100, 60)
assert rgb_to_hsl(102, 255, 102) == (120, 100, 70)
assert rgb_to_hsl(153, 255, 153) == (120, 100, 80)
assert rgb_to_hsl(204, 255, 204) == (120, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (120, 100, 100)
def test_hsl_to_rgb_part_16():
assert hsl_to_rgb(180, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(180, 100, 10) == (0, 51, 51)
assert hsl_to_rgb(180, 100, 20) == (0, 102, 102)
assert hsl_to_rgb(180, 100, 30) == (0, 153, 153)
assert hsl_to_rgb(180, 100, 40) == (0, 204, 204)
assert hsl_to_rgb(180, 100, 50) == (0, 255, 255)
assert hsl_to_rgb(180, 100, 60) == (51, 255, 255)
assert hsl_to_rgb(180, 100, 70) == (102, 255, 255)
assert hsl_to_rgb(180, 100, 80) == (153, 255, 255)
assert hsl_to_rgb(180, 100, 90) == (204, 255, 255)
assert hsl_to_rgb(180, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_16():
# assert rgb_to_hsl(0, 0, 0) == (180, 100, 0)
assert rgb_to_hsl(0, 51, 51) == (180, 100, 10)
assert rgb_to_hsl(0, 102, 102) == (180, 100, 20)
assert rgb_to_hsl(0, 153, 153) == (180, 100, 30)
assert rgb_to_hsl(0, 204, 204) == (180, 100, 40)
assert rgb_to_hsl(0, 255, 255) == (180, 100, 50)
assert rgb_to_hsl(51, 255, 255) == (180, 100, 60)
assert rgb_to_hsl(102, 255, 255) == (180, 100, 70)
assert rgb_to_hsl(153, 255, 255) == (180, 100, 80)
assert rgb_to_hsl(204, 255, 255) == (180, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (180, 100, 100)
def test_hsl_to_rgb_part_17():
assert hsl_to_rgb(240, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(240, 100, 10) == (0, 0, 51)
assert hsl_to_rgb(240, 100, 20) == (0, 0, 102)
assert hsl_to_rgb(240, 100, 30) == (0, 0, 153)
assert hsl_to_rgb(240, 100, 40) == (0, 0, 204)
assert hsl_to_rgb(240, 100, 50) == (0, 0, 255)
assert hsl_to_rgb(240, 100, 60) == (51, 51, 255)
assert hsl_to_rgb(240, 100, 70) == (102, 102, 255)
assert hsl_to_rgb(240, 100, 80) == (153, 153, 255)
assert hsl_to_rgb(240, 100, 90) == (204, 204, 255)
assert hsl_to_rgb(240, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_17():
# assert rgb_to_hsl(0, 0, 0) == (240, 100, 0)
assert rgb_to_hsl(0, 0, 51) == (240, 100, 10)
assert rgb_to_hsl(0, 0, 102) == (240, 100, 20)
assert rgb_to_hsl(0, 0, 153) == (240, 100, 30)
assert rgb_to_hsl(0, 0, 204) == (240, 100, 40)
assert rgb_to_hsl(0, 0, 255) == (240, 100, 50)
assert rgb_to_hsl(51, 51, 255) == (240, 100, 60)
assert rgb_to_hsl(102, 102, 255) == (240, 100, 70)
assert rgb_to_hsl(153, 153, 255) == (240, 100, 80)
assert rgb_to_hsl(204, 204, 255) == (240, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (240, 100, 100)
def test_hsl_to_rgb_part_18():
assert hsl_to_rgb(300, 100, 0) == (0, 0, 0)
assert hsl_to_rgb(300, 100, 10) == (51, 0, 51)
assert hsl_to_rgb(300, 100, 20) == (102, 0, 102)
assert hsl_to_rgb(300, 100, 30) == (153, 0, 153)
assert hsl_to_rgb(300, 100, 40) == (204, 0, 204)
assert hsl_to_rgb(300, 100, 50) == (255, 0, 255)
assert hsl_to_rgb(300, 100, 60) == (255, 51, 255)
assert hsl_to_rgb(300, 100, 70) == (255, 102, 255)
assert hsl_to_rgb(300, 100, 80) == (255, 153, 255)
assert hsl_to_rgb(300, 100, 90) == (255, 204, 255)
assert hsl_to_rgb(300, 100, 100) == (255, 255, 255)
def test_rgb_to_hsl_part_18():
# assert rgb_to_hsl(0, 0, 0) == (300, 100, 0)
assert rgb_to_hsl(51, 0, 51) == (300, 100, 10)
assert rgb_to_hsl(102, 0, 102) == (300, 100, 20)
assert rgb_to_hsl(153, 0, 153) == (300, 100, 30)
assert rgb_to_hsl(204, 0, 204) == (300, 100, 40)
assert rgb_to_hsl(255, 0, 255) == (300, 100, 50)
assert rgb_to_hsl(255, 51, 255) == (300, 100, 60)
assert rgb_to_hsl(255, 102, 255) == (300, 100, 70)
assert rgb_to_hsl(255, 153, 255) == (300, 100, 80)
assert rgb_to_hsl(255, 204, 255) == (300, 100, 90)
# assert rgb_to_hsl(255, 255, 255) == (300, 100, 100)
|
|
"""Cutoff-based soft filtering of genomic variants.
"""
from distutils.version import LooseVersion
import math
import os
import shutil
import numpy
import toolz as tz
import yaml
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do, programs
from bcbio.variation import vcfutils
# ## General functionality
def cutoff_w_expression(vcf_file, expression, data, name="+", filterext="",
extra_cmd="", limit_regions="variant_regions"):
"""Perform cutoff-based soft filtering using bcftools expressions like %QUAL < 20 || DP < 4.
"""
base, ext = utils.splitext_plus(vcf_file)
out_file = "{base}-filter{filterext}{ext}".format(**locals())
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
if vcfutils.vcf_has_variants(vcf_file):
bcftools = config_utils.get_program("bcftools", data["config"])
bgzip_cmd = "| bgzip -c" if out_file.endswith(".gz") else ""
intervals = ""
if limit_regions == "variant_regions":
variant_regions = dd.get_variant_regions(data)
if variant_regions:
intervals = "-T %s" % vcfutils.bgzip_and_index(variant_regions, data["config"])
cmd = ("{bcftools} filter -O v {intervals} --soft-filter '{name}' "
"-e '{expression}' -m '+' {vcf_file} {extra_cmd} {bgzip_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()),
"Cutoff-based soft filtering %s with %s" % (vcf_file, expression), data)
else:
shutil.copy(vcf_file, out_file)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
def genotype_filter(vcf_file, expression, data, name, filterext=""):
"""Perform genotype based filtering using GATK with the provided expression.
Adds FT tags to genotypes, rather than the general FILTER flag.
"""
base, ext = utils.splitext_plus(vcf_file)
out_file = "{base}-filter{filterext}{ext}".format(**locals())
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "VariantFiltration",
"-R", tz.get_in(["reference", "fasta", "base"], data),
"--variant", vcf_file,
"--out", tx_out_file,
"--genotypeFilterName", name,
"--genotypeFilterExpression", "'%s'" % expression]
jvm_opts = broad.get_gatk_framework_opts(data["config"], os.path.dirname(tx_out_file))
do.run(broad.gatk_cmd("gatk-framework", jvm_opts, params), "Filter with expression: %s" % expression)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
def genotype_filter_toref(vcf_file, expression, data, filterext=""):
"""Perform genotype filters by converting failing calls to reference, using bcftools
Prefer the FT approach used in genotype_filter, but bcftools handles complex filter
expressions that GATK will not.
"""
base, ext = utils.splitext_plus(vcf_file)
out_file = "{base}-filter{filterext}{ext}".format(**locals())
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
if vcfutils.vcf_has_variants(vcf_file):
bcftools = config_utils.get_program("bcftools", data["config"])
output_type = "z" if tx_out_file.endswith(".gz") else "v"
cmd = ("{bcftools} filter -O {output_type} "
"-e '{expression}' -S 0 {vcf_file} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotype filtering to ref %s with %s" % (vcf_file, expression), data)
else:
shutil.copy(vcf_file, out_file)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
# ## Caller specific
def freebayes(in_file, ref_file, vrn_files, data):
"""FreeBayes filters: cutoff-based soft filtering.
"""
out_file = _freebayes_cutoff(in_file, data)
#out_file = _freebayes_custom(in_file, ref_file, data)
return out_file
def _freebayes_custom(in_file, ref_file, data):
"""Custom FreeBayes filtering using bcbio.variation, tuned to human NA12878 results.
Experimental: for testing new methods.
"""
if vcfutils.get_paired_phenotype(data):
return None
config = data["config"]
bv_ver = programs.get_version("bcbio_variation", config=config)
if LooseVersion(bv_ver) < LooseVersion("0.1.1"):
return None
out_file = "%s-filter%s" % os.path.splitext(in_file)
if not utils.file_exists(out_file):
tmp_dir = utils.safe_makedir(os.path.join(os.path.dirname(in_file), "tmp"))
resources = config_utils.get_resources("bcbio_variation", config)
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
java_args = ["-Djava.io.tmpdir=%s" % tmp_dir]
cmd = ["bcbio-variation"] + jvm_opts + java_args + \
["variant-filter", "freebayes", in_file, ref_file]
do.run(cmd, "Custom FreeBayes filtering using bcbio.variation")
return out_file
def _freebayes_cutoff(in_file, data):
"""Perform filtering of FreeBayes results, flagging low confidence calls.
Filters using cutoffs on low depth based on Meynert et al's work modeling sensitivity
of homozygote and heterozygote calling on depth:
http://www.ncbi.nlm.nih.gov/pubmed/23773188
and high depth heterozygote SNP filtering based on Heng Li's work
evaluating variant calling artifacts:
http://arxiv.org/abs/1404.0929
Tuned based on NA12878 call comparisons to Genome in a Bottle reference genome.
"""
if not vcfutils.vcf_has_variants(in_file):
base, ext = utils.splitext_plus(in_file)
out_file = "{base}-filter{ext}".format(**locals())
if not utils.file_exists(out_file):
shutil.copy(in_file, out_file)
if out_file.endswith(".vcf.gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
depth_thresh, qual_thresh = None, None
if _do_high_depth_filter(data):
stats = _calc_vcf_stats(in_file)
if stats["avg_depth"] > 0:
depth_thresh = int(math.ceil(stats["avg_depth"] + 3 * math.pow(stats["avg_depth"], 0.5)))
qual_thresh = depth_thresh * 2.0 # Multiplier from default GATK QD cutoff filter
filters = ('(AF[0] <= 0.5 && (DP < 4 || (DP < 13 && %QUAL < 10))) || '
'(AF[0] > 0.5 && (DP < 4 && %QUAL < 50))')
if depth_thresh:
filters += ' || (%QUAL < {qual_thresh} && DP > {depth_thresh} && AF[0] <= 0.5)'.format(**locals())
return cutoff_w_expression(in_file, filters, data, name="FBQualDepth")
def _do_high_depth_filter(data):
"""Check if we should do high depth filtering -- only on germline non-regional calls.
"""
is_genome = tz.get_in(["config", "algorithm", "coverage_interval"], data, "").lower() == "genome"
is_paired = vcfutils.get_paired_phenotype(data)
return is_genome and not is_paired
def _calc_vcf_stats(in_file):
"""Calculate statistics on VCF for filtering, saving to a file for quick re-runs.
"""
out_file = "%s-stats.yaml" % utils.splitext_plus(in_file)[0]
if not utils.file_exists(out_file):
stats = {"avg_depth": _average_called_depth(in_file)}
with open(out_file, "w") as out_handle:
yaml.safe_dump(stats, out_handle, default_flow_style=False, allow_unicode=False)
return stats
else:
with open(out_file) as in_handle:
stats = yaml.safe_load(in_handle)
return stats
def _average_called_depth(in_file):
"""Retrieve the average depth of called reads in the provided VCF.
"""
import cyvcf2
depths = []
for rec in cyvcf2.VCF(str(in_file)):
d = rec.INFO.get("DP")
if d is not None:
depths.append(int(d))
if len(depths) > 0:
return int(math.ceil(numpy.mean(depths)))
else:
return 0
def platypus(in_file, data):
"""Filter Platypus calls, removing Q20 filter and replacing with depth and quality based filter.
Platypus uses its own VCF nomenclature: TC == DP, FR == AF
Platypus gVCF output appears to have an 0/1 index problem so the reference block
regions are 1 base outside regions of interest. We avoid limiting regions during
filtering when using it.
"""
filters = ('(FR[0] <= 0.5 && TC < 4 && %QUAL < 20) || '
'(TC < 13 && %QUAL < 10) || '
'(FR[0] > 0.5 && TC < 4 && %QUAL < 50)')
limit_regions = "variant_regions" if not vcfutils.is_gvcf_file(in_file) else None
return cutoff_w_expression(in_file, filters, data, name="PlatQualDepth",
extra_cmd="| sed 's/\\tQ20\\t/\\tPASS\\t/'", limit_regions=limit_regions)
def samtools(in_file, data):
"""Filter samtools calls based on depth and quality, using similar approaches to FreeBayes.
"""
filters = ('((AC[0] / AN) <= 0.5 && DP < 4 && %QUAL < 20) || '
'(DP < 13 && %QUAL < 10) || '
'((AC[0] / AN) > 0.5 && DP < 4 && %QUAL < 50)')
return cutoff_w_expression(in_file, filters, data, name="stQualDepth")
def _gatk_general():
"""General filters useful for both GATK SNPs and indels.
Remove low quality, low allele fraction variants at the ends of reads.
Generally useful metric identified by looking at 10x data.
https://community.10xgenomics.com/t5/Genome-Exome-Forum/Best-practices-for-trimming-adapters-when-variant-calling/m-p/473
https://github.com/bcbio/bcbio_validations/tree/master/gatk4#10x-adapter-trimming--low-frequency-allele-filter
"""
return ["(QD < 10.0 && AD[1] / (AD[1] + AD[0]) < 0.25 && ReadPosRankSum < 0.0)"]
def gatk_snp_cutoff(in_file, data):
"""Perform cutoff-based soft filtering on GATK SNPs using best-practice recommendations.
We have a more lenient mapping quality (MQ) filter compared to GATK defaults.
The recommended filter (MQ < 40) is too stringent, so we adjust to 30:
http://imgur.com/a/oHRVB
QD and FS are not calculated when generating gVCF output:
https://github.com/broadgsa/gatk-protected/blob/e91472ddc7d58ace52db0cab4d70a072a918d64c/protected/gatk-tools-protected/src/main/java/org/broadinstitute/gatk/tools/walkers/haplotypecaller/HaplotypeCaller.java#L300
The extra command removes escaped quotes in the VCF output which
pyVCF fails on.
Does not use the GATK best practice recommend SOR filter (SOR > 3.0) as it
has a negative impact on sensitivity relative to precision:
https://github.com/bcbio/bcbio_validations/tree/master/gatk4#na12878-hg38
"""
filters = ["MQ < 30.0", "MQRankSum < -12.5", "ReadPosRankSum < -8.0"]
filters += ["QD < 2.0", "FS > 60.0"]
filters += _gatk_general()
# GATK Haplotype caller (v2.2) appears to have much larger HaplotypeScores
# resulting in excessive filtering, so avoid this metric
variantcaller = utils.get_in(data, ("config", "algorithm", "variantcaller"))
if variantcaller not in ["gatk-haplotype", "haplotyper"]:
filters.append("HaplotypeScore > 13.0")
return cutoff_w_expression(in_file, 'TYPE="snp" && (%s)' % " || ".join(filters), data, "GATKCutoffSNP", "SNP",
extra_cmd=r"""| sed 's/\\"//g'""")
def gatk_indel_cutoff(in_file, data):
"""Perform cutoff-based soft filtering on GATK indels using best-practice recommendations.
"""
filters = ["ReadPosRankSum < -20.0"]
filters += ["QD < 2.0", "FS > 200.0", "SOR > 10.0"]
filters += _gatk_general()
return cutoff_w_expression(in_file, 'TYPE="indel" && (%s)' % " || ".join(filters), data, "GATKCutoffIndel",
"INDEL", extra_cmd=r"""| sed 's/\\"//g'""")
|
|
from collections import Counter
from functools import reduce
from itertools import product
from operator import mul
import numpy as np
from .. import config
from ..base import tokenize
from ..core import flatten
from ..highlevelgraph import HighLevelGraph
from ..utils import M, parse_bytes
from .core import Array, normalize_chunks
from .utils import meta_from_array
def reshape_rechunk(inshape, outshape, inchunks):
assert all(isinstance(c, tuple) for c in inchunks)
ii = len(inshape) - 1
oi = len(outshape) - 1
result_inchunks = [None for i in range(len(inshape))]
result_outchunks = [None for i in range(len(outshape))]
while ii >= 0 or oi >= 0:
if inshape[ii] == outshape[oi]:
result_inchunks[ii] = inchunks[ii]
result_outchunks[oi] = inchunks[ii]
ii -= 1
oi -= 1
continue
din = inshape[ii]
dout = outshape[oi]
if din == 1:
result_inchunks[ii] = (1,)
ii -= 1
elif dout == 1:
result_outchunks[oi] = (1,)
oi -= 1
elif din < dout: # (4, 4, 4) -> (64,)
ileft = ii - 1
while (
ileft >= 0 and reduce(mul, inshape[ileft : ii + 1]) < dout
): # 4 < 64, 4*4 < 64, 4*4*4 == 64
ileft -= 1
if reduce(mul, inshape[ileft : ii + 1]) != dout:
raise ValueError("Shapes not compatible")
# Special case to avoid intermediate rechunking:
# When all the lower axis are completely chunked (chunksize=1) then
# we're simply moving around blocks.
if all(len(inchunks[i]) == inshape[i] for i in range(ii)):
for i in range(ii + 1):
result_inchunks[i] = inchunks[i]
result_outchunks[oi] = inchunks[ii] * np.prod(
list(map(len, inchunks[ileft:ii]))
)
else:
for i in range(ileft + 1, ii + 1): # need single-shape dimensions
result_inchunks[i] = (inshape[i],) # chunks[i] = (4,)
chunk_reduction = reduce(mul, map(len, inchunks[ileft + 1 : ii + 1]))
result_inchunks[ileft] = expand_tuple(inchunks[ileft], chunk_reduction)
prod = reduce(mul, inshape[ileft + 1 : ii + 1]) # 16
result_outchunks[oi] = tuple(
prod * c for c in result_inchunks[ileft]
) # (1, 1, 1, 1) .* 16
oi -= 1
ii = ileft - 1
elif din > dout: # (64,) -> (4, 4, 4)
oleft = oi - 1
while oleft >= 0 and reduce(mul, outshape[oleft : oi + 1]) < din:
oleft -= 1
if reduce(mul, outshape[oleft : oi + 1]) != din:
raise ValueError("Shapes not compatible")
# TODO: don't coalesce shapes unnecessarily
cs = reduce(mul, outshape[oleft + 1 : oi + 1])
result_inchunks[ii] = contract_tuple(inchunks[ii], cs) # (16, 16, 16, 16)
for i in range(oleft + 1, oi + 1):
result_outchunks[i] = (outshape[i],)
result_outchunks[oleft] = tuple(c // cs for c in result_inchunks[ii])
oi = oleft - 1
ii -= 1
return tuple(result_inchunks), tuple(result_outchunks)
def expand_tuple(chunks, factor):
"""
>>> expand_tuple((2, 4), 2)
(1, 1, 2, 2)
>>> expand_tuple((2, 4), 3)
(1, 1, 1, 1, 2)
>>> expand_tuple((3, 4), 2)
(1, 2, 2, 2)
>>> expand_tuple((7, 4), 3)
(2, 2, 3, 1, 1, 2)
"""
if factor == 1:
return chunks
out = []
for c in chunks:
x = c
part = max(x / factor, 1)
while x >= 2 * part:
out.append(int(part))
x -= int(part)
if x:
out.append(x)
assert sum(chunks) == sum(out)
return tuple(out)
def contract_tuple(chunks, factor):
"""Return simple chunks tuple such that factor divides all elements
Examples
--------
>>> contract_tuple((2, 2, 8, 4), 4)
(4, 8, 4)
"""
assert sum(chunks) % factor == 0
out = []
residual = 0
for chunk in chunks:
chunk += residual
div = chunk // factor
residual = chunk % factor
good = factor * div
if good:
out.append(good)
return tuple(out)
def reshape(x, shape, merge_chunks=True, limit=None):
"""Reshape array to new shape
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
merge_chunks : bool, default True
Whether to merge chunks using the logic in :meth:`dask.array.rechunk`
when communication is necessary given the input array chunking and
the output shape. With ``merge_chunks==False``, the input array will
be rechunked to a chunksize of 1, which can create very many tasks.
limit: int (optional)
The maximum block size to target in bytes. If no limit is provided,
it defaults to using the ``array.chunk-size`` Dask config value.
Notes
-----
This is a parallelized version of the ``np.reshape`` function with the
following limitations:
1. It assumes that the array is stored in `row-major order`_
2. It only allows for reshapings that collapse or merge dimensions like
``(1, 2, 3, 4) -> (1, 6, 4)`` or ``(64,) -> (4, 4, 4)``
.. _`row-major order`: https://en.wikipedia.org/wiki/Row-_and_column-major_order
When communication is necessary this algorithm depends on the logic within
rechunk. It endeavors to keep chunk sizes roughly the same when possible.
See :ref:`array-chunks.reshaping` for a discussion the tradeoffs of
``merge_chunks``.
See Also
--------
dask.array.rechunk
numpy.reshape
"""
# Sanitize inputs, look for -1 in shape
from .slicing import sanitize_index
shape = tuple(map(sanitize_index, shape))
known_sizes = [s for s in shape if s != -1]
if len(known_sizes) < len(shape):
if len(shape) - len(known_sizes) > 1:
raise ValueError("can only specify one unknown dimension")
# Fastpath for x.reshape(-1) on 1D arrays, allows unknown shape in x
# for this case only.
if len(shape) == 1 and x.ndim == 1:
return x
missing_size = sanitize_index(x.size / reduce(mul, known_sizes, 1))
shape = tuple(missing_size if s == -1 else s for s in shape)
if np.isnan(sum(x.shape)):
raise ValueError(
"Array chunk size or shape is unknown. shape: %s\n\n"
"Possible solution with x.compute_chunk_sizes()" % str(x.shape)
)
if reduce(mul, shape, 1) != x.size:
raise ValueError("total size of new array must be unchanged")
if x.shape == shape:
return x
meta = meta_from_array(x, len(shape))
name = "reshape-" + tokenize(x, shape)
if x.npartitions == 1:
key = next(flatten(x.__dask_keys__()))
dsk = {(name,) + (0,) * len(shape): (M.reshape, key, shape)}
chunks = tuple((d,) for d in shape)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, meta=meta)
# Logic or how to rechunk
din = len(x.shape)
dout = len(shape)
if not merge_chunks and din > dout:
x = x.rechunk({i: 1 for i in range(din - dout)})
inchunks, outchunks = reshape_rechunk(x.shape, shape, x.chunks)
# Check output chunks are not too large
max_chunksize_in_bytes = reduce(mul, [max(i) for i in outchunks]) * x.dtype.itemsize
if limit is None and config.get("array.slicing.split-large-chunks") is not False:
limit = parse_bytes(config.get("array.chunk-size"))
if max_chunksize_in_bytes > limit:
# Leave chunk sizes unaltered where possible
matching_chunks = Counter(inchunks) & Counter(outchunks)
chunk_plan = []
for out in outchunks:
if matching_chunks[out] > 0:
chunk_plan.append(out)
matching_chunks[out] -= 1
else:
chunk_plan.append("auto")
outchunks = normalize_chunks(
chunk_plan,
shape=shape,
limit=limit,
dtype=x.dtype,
previous_chunks=inchunks,
)
x2 = x.rechunk(inchunks)
# Construct graph
in_keys = list(product([x2.name], *[range(len(c)) for c in inchunks]))
out_keys = list(product([name], *[range(len(c)) for c in outchunks]))
shapes = list(product(*outchunks))
dsk = {a: (M.reshape, b, shape) for a, b, shape in zip(out_keys, in_keys, shapes)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x2])
return Array(graph, name, outchunks, meta=meta)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PoliciesOperations:
"""PoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.devtestlabs.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
lab_name: str,
policy_set_name: str,
expand: Optional[str] = None,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.PolicyList"]:
"""List policies in a given policy set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param policy_set_name: The name of the policy set.
:type policy_set_name: str
:param expand: Specify the $expand query. Example: 'properties($select=description)'.
:type expand: str
:param filter: The filter to apply to the operation. Example: '$filter=contains(name,'myName').
:type filter: str
:param top: The maximum number of resources to return from the operation. Example: '$top=10'.
:type top: int
:param orderby: The ordering expression for the results, using OData notation. Example:
'$orderby=name desc'.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.devtestlabs.models.PolicyList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'policySetName': self._serialize.url("policy_set_name", policy_set_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PolicyList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{policySetName}/policies'} # type: ignore
async def get(
self,
resource_group_name: str,
lab_name: str,
policy_set_name: str,
name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.Policy":
"""Get policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param policy_set_name: The name of the policy set.
:type policy_set_name: str
:param name: The name of the policy.
:type name: str
:param expand: Specify the $expand query. Example: 'properties($select=description)'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Policy, or the result of cls(response)
:rtype: ~azure.mgmt.devtestlabs.models.Policy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Policy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'policySetName': self._serialize.url("policy_set_name", policy_set_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Policy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{policySetName}/policies/{name}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
lab_name: str,
policy_set_name: str,
name: str,
policy: "_models.Policy",
**kwargs
) -> "_models.Policy":
"""Create or replace an existing policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param policy_set_name: The name of the policy set.
:type policy_set_name: str
:param name: The name of the policy.
:type name: str
:param policy: A Policy.
:type policy: ~azure.mgmt.devtestlabs.models.Policy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Policy, or the result of cls(response)
:rtype: ~azure.mgmt.devtestlabs.models.Policy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Policy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'policySetName': self._serialize.url("policy_set_name", policy_set_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(policy, 'Policy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Policy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Policy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{policySetName}/policies/{name}'} # type: ignore
async def delete(
self,
resource_group_name: str,
lab_name: str,
policy_set_name: str,
name: str,
**kwargs
) -> None:
"""Delete policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param policy_set_name: The name of the policy set.
:type policy_set_name: str
:param name: The name of the policy.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'policySetName': self._serialize.url("policy_set_name", policy_set_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{policySetName}/policies/{name}'} # type: ignore
async def update(
self,
resource_group_name: str,
lab_name: str,
policy_set_name: str,
name: str,
policy: "_models.PolicyFragment",
**kwargs
) -> "_models.Policy":
"""Allows modifying tags of policies. All other properties will be ignored.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param policy_set_name: The name of the policy set.
:type policy_set_name: str
:param name: The name of the policy.
:type name: str
:param policy: A Policy.
:type policy: ~azure.mgmt.devtestlabs.models.PolicyFragment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Policy, or the result of cls(response)
:rtype: ~azure.mgmt.devtestlabs.models.Policy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Policy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'policySetName': self._serialize.url("policy_set_name", policy_set_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(policy, 'PolicyFragment')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Policy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/policysets/{policySetName}/policies/{name}'} # type: ignore
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import datetime
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = [x for x in l.split(' ')
if x.startswith('<')]
mapping[alias] = canonical_email
return mapping
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email in mapping.iteritems():
changelog = changelog.replace(alias, email)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
reqs_in = []
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
return open(requirements_file, 'r').read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def write_requirements():
venv = os.environ.get('VIRTUAL_ENV', None)
if venv is not None:
with open("requirements.txt", "w") as req_file:
output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
stdout=subprocess.PIPE)
requirements = output.communicate()[0].strip()
req_file.write(requirements)
def _run_shell_command(cmd):
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE)
out = output.communicate()
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def _get_git_branch_name():
branch_ref = _run_shell_command("git symbolic-ref -q HEAD")
if branch_ref is None:
return "HEAD"
return branch_ref[len("refs/heads/"):]
def _get_git_next_version_suffix(branch_name):
datestamp = datetime.datetime.now().strftime('%Y%m%d')
if branch_name == 'milestone-proposed':
revno_prefix = "r"
else:
revno_prefix = ""
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
milestonever = _run_shell_command(milestone_cmd)
if not milestonever:
milestonever = branch_name
post_version = _get_git_post_version()
# post version should look like:
# 0.1.1.4.cc9e28a
# where the bit after the last . is the short sha, and the bit between
# the last and second to last is the revno count
(revno, sha) = post_version.split(".")[-2:]
first_half = "%(milestonever)s~%(datestamp)s" % locals()
second_half = "%(revno_prefix)s%(revno)s.%(sha)s" % locals()
return ".".join((first_half, second_half))
def _get_git_current_tag():
return _run_shell_command("git tag --contains HEAD")
def _get_git_tag_info():
return _run_shell_command("git describe --tags")
def _get_git_post_version():
current_tag = _get_git_current_tag()
if current_tag is not None:
return current_tag
else:
tag_info = _get_git_tag_info()
if tag_info is None:
base_version = "0.0"
cmd = "git --no-pager log --oneline"
out = _run_shell_command(cmd)
revno = len(out.split("\n"))
sha = _run_shell_command("git describe --always")
else:
tag_infos = tag_info.split("-")
base_version = "-".join(tag_infos[:-2])
(revno, sha) = tag_infos[-2:]
# git describe prefixes the sha with a g
sha = sha[1:]
return "%s.%s.%s" % (base_version, revno, sha)
def write_git_changelog():
"""Write a changelog based on the git changelog."""
if os.path.isdir('.git'):
git_log_cmd = 'git log --stat'
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open("ChangeLog", "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = '[email protected]'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
if os.path.isdir('.git'):
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git log --format='%aN <%aE>' | sort -u | "
"grep -v " + jenkins_email)
changelog = _run_shell_command(git_log_cmd)
mailmap = parse_mailmap()
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def read_versioninfo(project):
"""Read the versioninfo file. If it doesn't exist, we're in a github
zipball, and there's really no way to know what version we really
are, but that should be ok, because the utility of that should be
just about nil if this code path is in use in the first place."""
versioninfo_path = os.path.join(project, 'versioninfo')
if os.path.exists(versioninfo_path):
with open(versioninfo_path, 'r') as vinfo:
version = vinfo.read().strip()
else:
version = "0.0.0"
return version
def write_versioninfo(project, version):
"""Write a simple file containing the version of the package."""
open(os.path.join(project, 'versioninfo'), 'w').write("%s\n" % version)
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules)
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print "Generating %s" % output_filename
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in ['html', 'man']:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
cmdclass['build_sphinx'] = LocalBuildDoc
except ImportError:
pass
return cmdclass
def get_pre_version(projectname, base_version):
"""Return a version which is leading up to a version that will
be released in the future."""
if os.path.isdir('.git'):
current_tag = _get_git_current_tag()
if current_tag is not None:
version = current_tag
else:
branch_name = os.getenv('BRANCHNAME',
os.getenv('GERRIT_REFNAME',
_get_git_branch_name()))
version_suffix = _get_git_next_version_suffix(branch_name)
version = "%s~%s" % (base_version, version_suffix)
write_versioninfo(projectname, version)
return version
else:
version = read_versioninfo(projectname)
return version
def get_post_version(projectname):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
if os.path.isdir('.git'):
version = _get_git_post_version()
write_versioninfo(projectname, version)
return version
return read_versioninfo(projectname)
|
|
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for list_pager."""
import unittest2
from apitools.base.py import list_pager
from apitools.base.py.testing import mock
from samples.fusiontables_sample.fusiontables_v1 \
import fusiontables_v1_client as fusiontables
from samples.fusiontables_sample.fusiontables_v1 \
import fusiontables_v1_messages as messages
from samples.iam_sample.iam_v1 import iam_v1_client as iam_client
from samples.iam_sample.iam_v1 import iam_v1_messages as iam_messages
class ListPagerTest(unittest2.TestCase):
def _AssertInstanceSequence(self, results, n):
counter = 0
for instance in results:
self.assertEqual(instance.name, 'c' + str(counter))
counter += 1
self.assertEqual(counter, n)
def setUp(self):
self.mocked_client = mock.Client(fusiontables.FusiontablesV1)
self.mocked_client.Mock()
self.addCleanup(self.mocked_client.Unmock)
def testYieldFromList(self):
self.mocked_client.column.List.Expect(
messages.FusiontablesColumnListRequest(
maxResults=100,
pageToken=None,
tableId='mytable',
),
messages.ColumnList(
items=[
messages.Column(name='c0'),
messages.Column(name='c1'),
messages.Column(name='c2'),
messages.Column(name='c3'),
],
nextPageToken='x',
))
self.mocked_client.column.List.Expect(
messages.FusiontablesColumnListRequest(
maxResults=100,
pageToken='x',
tableId='mytable',
),
messages.ColumnList(
items=[
messages.Column(name='c4'),
messages.Column(name='c5'),
messages.Column(name='c6'),
messages.Column(name='c7'),
],
))
client = fusiontables.FusiontablesV1(get_credentials=False)
request = messages.FusiontablesColumnListRequest(tableId='mytable')
results = list_pager.YieldFromList(client.column, request)
self._AssertInstanceSequence(results, 8)
def testYieldNoRecords(self):
client = fusiontables.FusiontablesV1(get_credentials=False)
request = messages.FusiontablesColumnListRequest(tableId='mytable')
results = list_pager.YieldFromList(client.column, request, limit=False)
self.assertEqual(0, len(list(results)))
def testYieldFromListPartial(self):
self.mocked_client.column.List.Expect(
messages.FusiontablesColumnListRequest(
maxResults=6,
pageToken=None,
tableId='mytable',
),
messages.ColumnList(
items=[
messages.Column(name='c0'),
messages.Column(name='c1'),
messages.Column(name='c2'),
messages.Column(name='c3'),
],
nextPageToken='x',
))
self.mocked_client.column.List.Expect(
messages.FusiontablesColumnListRequest(
maxResults=2,
pageToken='x',
tableId='mytable',
),
messages.ColumnList(
items=[
messages.Column(name='c4'),
messages.Column(name='c5'),
messages.Column(name='c6'),
messages.Column(name='c7'),
],
))
client = fusiontables.FusiontablesV1(get_credentials=False)
request = messages.FusiontablesColumnListRequest(tableId='mytable')
results = list_pager.YieldFromList(client.column, request, limit=6)
self._AssertInstanceSequence(results, 6)
def testYieldFromListPaging(self):
self.mocked_client.column.List.Expect(
messages.FusiontablesColumnListRequest(
maxResults=5,
pageToken=None,
tableId='mytable',
),
messages.ColumnList(
items=[
messages.Column(name='c0'),
messages.Column(name='c1'),
messages.Column(name='c2'),
messages.Column(name='c3'),
messages.Column(name='c4'),
],
nextPageToken='x',
))
self.mocked_client.column.List.Expect(
messages.FusiontablesColumnListRequest(
maxResults=4,
pageToken='x',
tableId='mytable',
),
messages.ColumnList(
items=[
messages.Column(name='c5'),
messages.Column(name='c6'),
messages.Column(name='c7'),
messages.Column(name='c8'),
],
))
client = fusiontables.FusiontablesV1(get_credentials=False)
request = messages.FusiontablesColumnListRequest(tableId='mytable')
results = list_pager.YieldFromList(client.column,
request,
limit=9,
batch_size=5)
self._AssertInstanceSequence(results, 9)
def testYieldFromListBatchSizeNone(self):
self.mocked_client.column.List.Expect(
messages.FusiontablesColumnListRequest(
maxResults=None,
pageToken=None,
tableId='mytable',
),
messages.ColumnList(
items=[
messages.Column(name='c0'),
messages.Column(name='c1'),
messages.Column(name='c2'),
messages.Column(name='c3'),
messages.Column(name='c4'),
messages.Column(name='c5'),
messages.Column(name='c6'),
],
nextPageToken='x',
))
client = fusiontables.FusiontablesV1(get_credentials=False)
request = messages.FusiontablesColumnListRequest(tableId='mytable')
results = list_pager.YieldFromList(client.column,
request,
limit=5,
batch_size=None)
self._AssertInstanceSequence(results, 5)
def testYieldFromListEmpty(self):
self.mocked_client.column.List.Expect(
messages.FusiontablesColumnListRequest(
maxResults=6,
pageToken=None,
tableId='mytable',
),
messages.ColumnList())
client = fusiontables.FusiontablesV1(get_credentials=False)
request = messages.FusiontablesColumnListRequest(tableId='mytable')
results = list_pager.YieldFromList(client.column, request, limit=6)
self._AssertInstanceSequence(results, 0)
def testYieldFromListWithPredicate(self):
self.mocked_client.column.List.Expect(
messages.FusiontablesColumnListRequest(
maxResults=100,
pageToken=None,
tableId='mytable',
),
messages.ColumnList(
items=[
messages.Column(name='c0'),
messages.Column(name='bad0'),
messages.Column(name='c1'),
messages.Column(name='bad1'),
],
nextPageToken='x',
))
self.mocked_client.column.List.Expect(
messages.FusiontablesColumnListRequest(
maxResults=100,
pageToken='x',
tableId='mytable',
),
messages.ColumnList(
items=[
messages.Column(name='c2'),
],
))
client = fusiontables.FusiontablesV1(get_credentials=False)
request = messages.FusiontablesColumnListRequest(tableId='mytable')
results = list_pager.YieldFromList(
client.column, request, predicate=lambda x: 'c' in x.name)
self._AssertInstanceSequence(results, 3)
class ListPagerAttributeTest(unittest2.TestCase):
def setUp(self):
self.mocked_client = mock.Client(iam_client.IamV1)
self.mocked_client.Mock()
self.addCleanup(self.mocked_client.Unmock)
def testYieldFromListWithAttributes(self):
self.mocked_client.iamPolicies.GetPolicyDetails.Expect(
iam_messages.GetPolicyDetailsRequest(
pageSize=100,
pageToken=None,
fullResourcePath='myresource',
),
iam_messages.GetPolicyDetailsResponse(
policies=[
iam_messages.PolicyDetail(fullResourcePath='c0'),
iam_messages.PolicyDetail(fullResourcePath='c1'),
],
nextPageToken='x',
))
self.mocked_client.iamPolicies.GetPolicyDetails.Expect(
iam_messages.GetPolicyDetailsRequest(
pageSize=100,
pageToken='x',
fullResourcePath='myresource',
),
iam_messages.GetPolicyDetailsResponse(
policies=[
iam_messages.PolicyDetail(fullResourcePath='c2'),
],
))
client = iam_client.IamV1(get_credentials=False)
request = iam_messages.GetPolicyDetailsRequest(
fullResourcePath='myresource')
results = list_pager.YieldFromList(
client.iamPolicies, request,
batch_size_attribute='pageSize',
method='GetPolicyDetails', field='policies')
i = 0
for i, instance in enumerate(results):
self.assertEquals('c{0}'.format(i), instance.fullResourcePath)
self.assertEquals(2, i)
def testYieldFromListWithNoBatchSizeAttribute(self):
self.mocked_client.iamPolicies.GetPolicyDetails.Expect(
iam_messages.GetPolicyDetailsRequest(
pageToken=None,
fullResourcePath='myresource',
),
iam_messages.GetPolicyDetailsResponse(
policies=[
iam_messages.PolicyDetail(fullResourcePath='c0'),
iam_messages.PolicyDetail(fullResourcePath='c1'),
],
))
client = iam_client.IamV1(get_credentials=False)
request = iam_messages.GetPolicyDetailsRequest(
fullResourcePath='myresource')
results = list_pager.YieldFromList(
client.iamPolicies, request,
batch_size_attribute=None,
method='GetPolicyDetails', field='policies')
i = 0
for i, instance in enumerate(results):
self.assertEquals('c{0}'.format(i), instance.fullResourcePath)
self.assertEquals(1, i)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions called by the generated code to execute an eager-mode op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from autograd import core as ag_core
import six
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
def execute(op_name, num_outputs, inputs, attrs=None, name=None):
"""Execute a TensorFlow operation.
Args:
op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to
execute.
num_outputs: The number of outputs of the operation to fetch.
(Explicitly provided instead of being inferred for performance
reasons).
inputs: A list of inputs to the operation. Each entry should be a Tensor, or
a value which can be passed to the Tensor constructor to create one.
attrs: A tuple with alternating string attr names and attr values for this
operation.
name: Customized name for the operation.
Returns:
None if there are no outputs, a single Tensor object if there is one output
and a list of Tensor objects if there are multiple outputs.
Raises:
An exception on error.
"""
ctx = context.get_default_context()
# TODO(apassos) move this to convert_to_tensor
inputs = [ag_core.getval(x) for x in inputs]
# pylint: disable=protected-access
input_handles = [c._handle for c in inputs]
device_name = ctx.device_name
try:
outh = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name,
str(op_name), input_handles, attrs,
num_outputs)
# pylint: enable=protected-access
except core._NotOkStatusException as e: # pylint: disable=protected-access
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
raise core._status_to_exception(e.code, message) # pylint: disable=protected-access
# pylint: enable=protected-access
tensors = [tensor._tensor_from_handle(x) for x in outh] # pylint: disable=protected-access
# TODO(alive, cais): Use the execution callback mechanism.
if core.active_trace() is not None:
trace_name = name if name else op_name
for t in tensors:
# pylint: disable=protected-access
core.active_trace().record_tensor(trace_name,
ops.tensor_id(t),
t._device_name(),
t.shape.num_elements())
# pylint: enable=protected-access
# TODO(cais): Optimize this, perhaps by replacing this execute function with
# a different one when there are execution callback(s).
for callback in ctx.post_execution_callbacks:
callback(op_name, name, attrs, inputs, tensors)
return tensors
def record_gradient(unused_op_name, unused_inputs, unused_attrs, results,
unused_name):
"""Import backprop if you want gradients recorded."""
return results
def make_float(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def make_int(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def make_str(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def make_bool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def make_type(v, arg_name):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(arg_name, repr(v)))
i = v.as_datatype_enum
return i
def make_shape(v, arg_name):
"""Convert v into a list."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# None if the rank is unknown, otherwise a list of ints (or Nones in the
# position where the dimension is unknown).
try:
shape = tensor_shape.as_shape(v)
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s" % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s" % (arg_name, e))
if shape.ndims is None:
return None
else:
return shape.as_list()
def make_tensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
elif isinstance(v, six.string_types):
pb = tensor_pb2.TensorProto()
text_format.Merge(v, pb)
return pb
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
def args_to_matching_eager(l, default_dtype=None):
"""Convert sequence `l` to eager same-type Tensors."""
# TODO(josh11b): Could we do a better job if we also passed in the
# allowed dtypes when that was known?
# Is some input already a Tensor with a dtype?
dtype = None
for t in l:
if isinstance(ag_core.getval(t), tensor.Tensor):
dtype = t.dtype
break
if dtype is None:
# Infer a dtype based on the first value, and use that dtype for the
# remaining values.
ret = []
for t in l:
ret.append(ops.convert_to_tensor(t, dtype, preferred_dtype=default_dtype))
if dtype is None:
dtype = ret[-1].dtype
else:
ret = [ops.convert_to_tensor(t, dtype) for t in l]
return dtype, ret
def convert_to_mixed_eager_tensors(values):
v = [t if isinstance(ag_core.getval(t), tensor.Tensor) else tensor.Tensor(t)
for t in values]
types = [t.dtype for t in v]
return types, v
def args_to_mixed_eager_tensors(lists):
"""Converts a list of same-length lists of values to eager tensors."""
assert len(lists) > 1
# Generate an error if len(lists[i]) is not the same for all i.
lists_ret = []
for l in lists[1:]:
if len(l) != len(lists[0]):
raise ValueError(
"Expected list arguments to be the same length: %d != %d (%r vs. %r)"
% (len(lists[0]), len(l), lists[0], l))
lists_ret.append([])
# Convert the first element of each list first, then the second element, etc.
types = []
for i in range(len(lists[0])):
dtype = None
# If any list has a Tensor, use that dtype
for l in lists:
if isinstance(ag_core.getval(l[i]), tensor.Tensor):
dtype = l[i].dtype
break
if dtype is None:
# Convert the first one and use its dtype.
lists_ret[0].append(ops.convert_to_tensor(lists[0][i]))
dtype = lists_ret[0][i].dtype
for j in range(1, len(lists)):
lists_ret[j].append(
ops.convert_to_tensor(lists[j][i], dtype=dtype))
else:
# Convert everything to the found dtype.
for j in range(len(lists)):
lists_ret[j].append(
ops.convert_to_tensor(lists[j][i], dtype=dtype))
types.append(dtype)
return types, lists_ret
|
|
from __future__ import unicode_literals
import datetime
from django.contrib.admin import ModelAdmin, TabularInline
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.test import RequestFactory, TestCase, override_settings
from .admin import InnerInline, site as admin_site
from .models import (
Author, BinaryTree, Book, Chapter, Child, ChildModel1, ChildModel2,
Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2,
Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent,
ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection,
Question, Sighting, SomeChildModel, SomeParentModel, Teacher,
)
INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>'
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class TestInline(TestDataMixin, TestCase):
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
self.factory = RequestFactory()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
holder = Holder.objects.get(dummy=13)
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get(
reverse('admin:admin_inlines_holder_change', args=(holder.id,))
)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author\\u002Dbook relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't carry her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox.
self.assertContains(
response,
'<tr><td colspan="4"><ul class="errorlist nonfield">'
'<li>The two titles must be the same</li></ul></td></tr>'
)
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get(reverse('admin:admin_inlines_novel_add'))
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="chapter_set-group">')
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get(reverse('admin:admin_inlines_poll_add'))
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="question_set-group">')
# The right callable should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get(reverse('admin:admin_inlines_holder4_add'))
self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4)
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Awesome tabular help text is awesome.)" '
'title="Awesome tabular help text is awesome." />',
1
)
# ReadOnly fields
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(
response,
'<img src="/static/admin/img/icon-unknown.svg" '
'class="help help-tooltip" width="10" height="10" '
'alt="(Help text for ReadOnlyInline)" '
'title="Help text for ReadOnlyInline" />',
1
)
def test_inline_hidden_field_no_column(self):
"""#18263 -- Make sure hidden fields don't get a column in tabular inlines"""
parent = SomeParentModel.objects.create(name='a')
SomeChildModel.objects.create(name='b', position='0', parent=parent)
SomeChildModel.objects.create(name='c', position='1', parent=parent)
response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,)))
self.assertNotContains(response, '<td class="field-position">')
self.assertContains(response, (
'<input id="id_somechildmodel_set-1-position" '
'name="somechildmodel_set-1-position" type="hidden" value="1" />'))
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add'))
self.assertContains(response,
'<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-1-0-name" type="text" class="vTextField" '
'name="-1-0-name" maxlength="100" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-2-0-name" type="text" class="vTextField" '
'name="-2-0-name" maxlength="100" />', html=True)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for locales that use
thousand separators
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,)))
inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for models with a
custom primary key field. Bug #18433.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
Ensure that an object can be created with inlines when it inherits
another class. Bug #19524.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = (
'<input id="id_binarytree_set-MAX_NUM_FORMS" '
'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />'
)
# The total number of forms will remain the same in either case
total_forms_hidden = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />'
)
response = self.client.get(reverse('admin:admin_inlines_binarytree_add'))
self.assertContains(response, max_forms_input % 3)
self.assertContains(response, total_forms_hidden)
response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
self.assertContains(response, max_forms_input % 2)
self.assertContains(response, total_forms_hidden)
def test_min_num(self):
"""
Ensure that min_num and extra determine number of forms.
"""
class MinNumInline(TabularInline):
model = BinaryTree
min_num = 2
extra = 3
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms)
self.assertContains(response, total_forms)
def test_custom_min_num(self):
"""
Ensure that get_min_num is called and used correctly.
"""
bt_head = BinaryTree.objects.create(name="Tree Head")
BinaryTree.objects.create(name="First Child", parent=bt_head)
class MinNumInline(TabularInline):
model = BinaryTree
extra = 3
def get_min_num(self, request, obj=None, **kwargs):
if obj:
return 5
return 2
modeladmin = ModelAdmin(BinaryTree, admin_site)
modeladmin.inlines = [MinNumInline]
min_forms = (
'<input id="id_binarytree_set-MIN_NUM_FORMS" '
'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d" />'
)
total_forms = (
'<input id="id_binarytree_set-TOTAL_FORMS" '
'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d" />'
)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_add'))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request)
self.assertContains(response, min_forms % 2)
self.assertContains(response, total_forms % 5)
request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(bt_head.id))
self.assertContains(response, min_forms % 5)
self.assertContains(response, total_forms % 8)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input id="id_nonautopkbook_set-0-rand_pk" '
'name="nonautopkbook_set-0-rand_pk" type="hidden" />',
html=True
)
self.assertContains(
response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" '
'name="nonautopkbook_set-2-0-rand_pk" type="hidden" />',
html=True
)
def test_inline_editable_pk(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" '
'name="editablepkbook_set-0-manual_pk" type="text" />',
html=True, count=1
)
self.assertContains(
response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" '
'name="editablepkbook_set-2-0-manual_pk" type="text" />',
html=True, count=1
)
def test_stacked_inline_edit_form_contains_has_original_class(self):
holder = Holder.objects.create(dummy=1)
holder.inner_set.create(dummy=1)
response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,)))
self.assertContains(
response,
'<div class="inline-related has_original" id="inner_set-0">',
count=1
)
self.assertContains(
response,
'<div class="inline-related" id="inner_set-1">',
count=1
)
def test_inlines_show_change_link_registered(self):
"Inlines `show_change_link` for registered models when enabled."
holder = Holder4.objects.create(dummy=1)
item1 = Inner4Stacked.objects.create(dummy=1, holder=holder)
item2 = Inner4Tabular.objects.create(dummy=1, holder=holder)
items = (
('inner4stacked', item1.pk),
('inner4tabular', item2.pk),
)
response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
for model, pk in items:
url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,))
self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML))
def test_inlines_show_change_link_unregistered(self):
"Inlines `show_change_link` disabled for unregistered models."
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',)))
self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
def test_tabular_inline_show_change_link_false_registered(self):
"Inlines `show_change_link` disabled by default."
poll = Poll.objects.create(name="New poll")
Question.objects.create(poll=poll)
response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,)))
self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model)
self.assertNotContains(response, INLINE_CHANGELINK_HTML)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class TestInlineMedia(TestDataMixin, TestCase):
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlineAdminForm(TestCase):
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class TestInlineProtectedOnDelete(TestDataMixin, TestCase):
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,))
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
@override_settings(ROOT_URLCONF="admin_inlines.urls")
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
book = author.books.create(name='The inline Book')
self.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,))
# Get the ID of the automatically created intermediate model for the Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,))
self.inner2_id = inner2.id
self.assertEqual(
self.client.login(username='admin', password='secret'),
True)
def test_inline_add_m2m_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author\\u002DBook Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author\\u002DBook Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_author_add'))
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author\\u002DBook Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(reverse('admin:admin_inlines_holder2_add'))
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author\\u002DBook Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author\\u002Dbook relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS" />', html=True)
self.assertContains(
response,
'<input type="hidden" id="id_Author_books-0-id" value="%i" '
'name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id,
html=True
)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(
response,
'<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" '
'name="inner2_set-TOTAL_FORMS" />',
html=True
)
self.assertNotContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
# max-num 0 means we can't add new ones
self.assertContains(response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" '
'value="0" name="inner2_set-MAX_NUM_FORMS" />', html=True)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_inlines.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def test_add_stackeds(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_holder4_add')))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_holder4_add')))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector(
'%s .inline-deletelink' % inline_id):
delete_link.click()
self.assertEqual(rows_length(), 3)
def test_add_inlines(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
inline form.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_profilecollection_add')))
# Check that there's only one inline to start with and that it has the
# correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# Check that the inline has been added, that it has the right id, and
# that it contains the right fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
# Check that the objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_profilecollection_add')))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# Verify that they're gone and that the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_alternating_rows(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_inlines_profilecollection_add')))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
row_selector = 'form#profilecollection_form tr.dynamic-profile_set'
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows")
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row2" % row_selector)), 1, msg="Expect one row2 styled row")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
|
from __future__ import absolute_import, unicode_literals
import collections
import json
import mock
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from wagtail.api.v2 import signal_handlers
from wagtail.tests.demosite import models
from wagtail.tests.testapp.models import StreamPage
from wagtail.wagtailcore.models import Page
def get_total_page_count():
# Need to take away 1 as the root page is invisible over the API
return Page.objects.live().public().count() - 1
class TestPageListing(TestCase):
fixtures = ['demosite.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailapi_v2:pages:listing'), params)
def get_page_id_list(self, content):
return [page['id'] for page in content['items']]
# BASIC TESTS
def test_basic(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check that the total count is there and correct
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
self.assertEqual(content['meta']['total_count'], get_total_page_count())
# Check that the items section is there
self.assertIn('items', content)
self.assertIsInstance(content['items'], list)
# Check that each page has a meta section with type, detail_url, html_url, slug and first_published_at attributes
for page in content['items']:
self.assertIn('meta', page)
self.assertIsInstance(page['meta'], dict)
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'slug', 'first_published_at'})
def test_unpublished_pages_dont_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogEntryPage.objects.get(id=16)
page.unpublish()
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], total_count - 1)
def test_private_pages_dont_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogIndexPage.objects.get(id=5)
page.view_restrictions.create(password='test')
new_total_count = get_total_page_count()
self.assertNotEqual(total_count, new_total_count)
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], new_total_count)
# TYPE FILTER
def test_type_filter_items_are_all_blog_entries(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(page['meta']['type'], 'demosite.BlogEntryPage')
# No specific fields available by default
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
def test_type_filter_total_count(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
# Total count must be reduced as this filters the results
self.assertEqual(content['meta']['total_count'], 3)
def test_type_filter_multiple(self):
response = self.get_response(type='demosite.BlogEntryPage,demosite.EventPage')
content = json.loads(response.content.decode('UTF-8'))
blog_page_seen = False
event_page_seen = False
for page in content['items']:
self.assertIn(page['meta']['type'], ['demosite.BlogEntryPage', 'demosite.EventPage'])
if page['meta']['type'] == 'demosite.BlogEntryPage':
blog_page_seen = True
elif page['meta']['type'] == 'demosite.EventPage':
event_page_seen = True
# Only generic fields available
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertTrue(blog_page_seen, "No blog pages were found in the items")
self.assertTrue(event_page_seen, "No event pages were found in the items")
def test_non_existant_type_gives_error(self):
response = self.get_response(type='demosite.IDontExist')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "type doesn't exist"})
def test_non_page_type_gives_error(self):
response = self.get_response(type='auth.User')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "type doesn't exist"})
# FIELDS
def test_fields_default(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'slug', 'first_published_at'})
def test_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'date', 'feed_image'})
def test_remove_fields(self):
response = self.get_response(fields='-title')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta'})
def test_remove_meta_fields(self):
response = self.get_response(fields='-html_url')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'slug', 'first_published_at'})
def test_remove_all_meta_fields(self):
response = self.get_response(fields='-type,-detail_url,-slug,-first_published_at,-html_url')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'title'})
def test_remove_id_field(self):
response = self.get_response(fields='-id')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'meta', 'title'})
def test_all_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='*')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'date', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'seo_title', 'slug', 'html_url', 'search_description'})
def test_all_fields_then_remove_something(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='*,-title,-date,-seo_title')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'slug', 'html_url', 'search_description'})
def test_remove_all_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='_,id,type')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta'})
self.assertEqual(set(page['meta'].keys()), {'type'})
def test_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(width,height)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(-title)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta'})
def test_all_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(*)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_all_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(_,id)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id'})
def test_nested_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='carousel_items(image(width,height))')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
for carousel_item in page['carousel_items']:
# Note: inline objects default to displaying all fields
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'image', 'embed_url', 'caption', 'link'})
self.assertEqual(set(carousel_item['image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_fields_child_relation(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'related_links'})
self.assertIsInstance(page['related_links'], list)
def test_fields_foreign_key(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
feed_image = page['feed_image']
if feed_image is not None:
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/api/v2beta/images/%d/' % feed_image['id'])
def test_fields_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'tags', 'title'})
self.assertIsInstance(page['tags'], list)
def test_fields_ordering(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='date,title,feed_image,related_links')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'date',
'feed_image',
'related_links',
]
self.assertEqual(list(content['items'][0].keys()), field_order)
def test_star_in_wrong_position_gives_error(self):
response = self.get_response(fields='title,*')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "fields error: '*' must be in the first position"})
def test_unknown_nested_fields_give_error(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(123,title,abc)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_parent_field_gives_error(self):
# parent field isn't allowed in listings
response = self.get_response(fields='parent')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: parent"})
def test_fields_without_type_gives_error(self):
response = self.get_response(fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: related_links"})
def test_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(fields='path')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: path"})
def test_fields_unknown_field_gives_error(self):
response = self.get_response(fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_fields_remove_unknown_field_gives_error(self):
response = self.get_response(fields='-123,-title,-abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_nested_fields_on_non_relational_field_gives_error(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title(foo,bar)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "'title' does not support nested fields"})
# FILTERING
def test_filtering_exact_filter(self):
response = self.get_response(title='Home page')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2])
def test_filtering_exact_filter_on_specific_field(self):
response = self.get_response(type='demosite.BlogEntryPage', date='2013-12-02')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_on_id(self):
response = self.get_response(id=16)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_on_boolean(self):
response = self.get_response(show_in_menus='false')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [8, 9, 16, 18, 19, 17])
def test_filtering_doesnt_work_on_specific_fields_without_type(self):
response = self.get_response(date='2013-12-02')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: date"})
def test_filtering_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18])
def test_filtering_multiple_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail,bird')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_unknown_field_gives_error(self):
response = self.get_response(not_a_field='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: not_a_field"})
def test_filtering_int_validation(self):
response = self.get_response(id='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "field filter error. 'abc' is not a valid value for id (invalid literal for int() with base 10: 'abc')"})
def test_filtering_boolean_validation(self):
response = self.get_response(show_in_menus='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "field filter error. 'abc' is not a valid value for show_in_menus (expected 'true' or 'false', got 'abc')"})
# CHILD OF FILTER
def test_child_of_filter(self):
response = self.get_response(child_of=5)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_child_of_root(self):
# "root" gets children of the homepage of the current site
response = self.get_response(child_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [4, 5, 6, 20, 12])
def test_child_of_with_type(self):
response = self.get_response(type='demosite.EventPage', child_of=5)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [])
def test_child_of_unknown_page_gives_error(self):
response = self.get_response(child_of=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "parent page doesn't exist"})
def test_child_of_not_integer_gives_error(self):
response = self.get_response(child_of='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "child_of must be a positive integer"})
def test_child_of_page_thats_not_in_same_site_gives_error(self):
# Root page is not in any site, so pretend it doesn't exist
response = self.get_response(child_of=1)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "parent page doesn't exist"})
# DESCENDANT OF FILTER
def test_descendant_of_filter(self):
response = self.get_response(descendant_of=6)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [10, 15, 17, 21, 22, 23])
def test_descendant_of_root(self):
# "root" gets decendants of the homepage of the current site
# Basically returns every page except the homepage
response = self.get_response(descendant_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_descendant_of_with_type(self):
response = self.get_response(type='tests.EventPage', descendant_of=6)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [])
def test_descendant_of_unknown_page_gives_error(self):
response = self.get_response(descendant_of=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ancestor page doesn't exist"})
def test_descendant_of_not_integer_gives_error(self):
response = self.get_response(descendant_of='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "descendant_of must be a positive integer"})
def test_descendant_of_page_thats_not_in_same_site_gives_error(self):
# Root page is not in any site, so pretend it doesn't exist
response = self.get_response(descendant_of=1)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ancestor page doesn't exist"})
def test_descendant_of_when_filtering_by_child_of_gives_error(self):
response = self.get_response(descendant_of=6, child_of=5)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by descendant_of with child_of is not supported"})
# ORDERING
def test_ordering_default(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_ordering_by_title(self):
response = self.get_response(order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [21, 22, 19, 23, 5, 16, 18, 12, 14, 8, 9, 4, 2, 13, 20, 17, 6, 10, 15])
def test_ordering_by_title_backwards(self):
response = self.get_response(order='-title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [15, 10, 6, 17, 20, 13, 2, 4, 9, 8, 14, 12, 18, 16, 5, 23, 19, 22, 21])
def test_ordering_by_random(self):
response_1 = self.get_response(order='random')
content_1 = json.loads(response_1.content.decode('UTF-8'))
page_id_list_1 = self.get_page_id_list(content_1)
response_2 = self.get_response(order='random')
content_2 = json.loads(response_2.content.decode('UTF-8'))
page_id_list_2 = self.get_page_id_list(content_2)
self.assertNotEqual(page_id_list_1, page_id_list_2)
def test_ordering_by_random_backwards_gives_error(self):
response = self.get_response(order='-random')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'random' (unknown field)"})
def test_ordering_by_random_with_offset_gives_error(self):
response = self.get_response(order='random', offset=10)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "random ordering with offset is not supported"})
def test_ordering_default_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_ordering_by_title_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [19, 16, 18])
def test_ordering_by_specific_field_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', order='date')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_ordering_by_unknown_field_gives_error(self):
response = self.get_response(order='not_a_field')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'not_a_field' (unknown field)"})
# LIMIT
def test_limit_only_two_items_returned(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['items']), 2)
def test_limit_total_count(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "limit"
self.assertEqual(content['meta']['total_count'], get_total_page_count())
def test_limit_not_integer_gives_error(self):
response = self.get_response(limit='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit must be a positive integer"})
def test_limit_too_high_gives_error(self):
response = self.get_response(limit=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 20"})
@override_settings(WAGTAILAPI_LIMIT_MAX=10)
def test_limit_maximum_can_be_changed(self):
response = self.get_response(limit=20)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 10"})
@override_settings(WAGTAILAPI_LIMIT_MAX=2)
def test_limit_default_changes_with_max(self):
# The default limit is 20. If WAGTAILAPI_LIMIT_MAX is less than that,
# the default should change accordingly.
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['items']), 2)
# OFFSET
def test_offset_5_usually_appears_5th_in_list(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list.index(5), 4)
def test_offset_5_moves_after_offset(self):
response = self.get_response(offset=4)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list.index(5), 0)
def test_offset_total_count(self):
response = self.get_response(offset=10)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "offset"
self.assertEqual(content['meta']['total_count'], get_total_page_count())
def test_offset_not_integer_gives_error(self):
response = self.get_response(offset='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "offset must be a positive integer"})
# SEARCH
def test_search_for_blog(self):
response = self.get_response(search='blog')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
# Check that the items are the blog index and three blog pages
self.assertEqual(set(page_id_list), set([5, 16, 18, 19]))
def test_search_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([16, 18, 19]))
def test_search_with_order(self):
response = self.get_response(search='blog', order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [19, 5, 16, 18])
@override_settings(WAGTAILAPI_SEARCH_ENABLED=False)
def test_search_when_disabled_gives_error(self):
response = self.get_response(search='blog')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "search is disabled"})
def test_search_when_filtering_by_tag_gives_error(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by tag with a search query is not supported"})
def test_search_operator_and(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog again', search_operator='and')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([18]))
def test_search_operator_or(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog again', search_operator='or')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([16, 18, 19]))
class TestPageDetail(TestCase):
fixtures = ['demosite.json']
def get_response(self, page_id, **params):
return self.client.get(reverse('wagtailapi_v2:pages:detail', args=(page_id, )), params)
def test_basic(self):
response = self.get_response(16)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check the id field
self.assertIn('id', content)
self.assertEqual(content['id'], 16)
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check the meta type
self.assertIn('type', content['meta'])
self.assertEqual(content['meta']['type'], 'demosite.BlogEntryPage')
# Check the meta detail_url
self.assertIn('detail_url', content['meta'])
self.assertEqual(content['meta']['detail_url'], 'http://localhost/api/v2beta/pages/16/')
# Check the meta html_url
self.assertIn('html_url', content['meta'])
self.assertEqual(content['meta']['html_url'], 'http://localhost/blog-index/blog-post/')
# Check the parent field
self.assertIn('parent', content['meta'])
self.assertIsInstance(content['meta']['parent'], dict)
self.assertEqual(set(content['meta']['parent'].keys()), {'id', 'meta', 'title'})
self.assertEqual(content['meta']['parent']['id'], 5)
self.assertIsInstance(content['meta']['parent']['meta'], dict)
self.assertEqual(set(content['meta']['parent']['meta'].keys()), {'type', 'detail_url', 'html_url'})
self.assertEqual(content['meta']['parent']['meta']['type'], 'demosite.BlogIndexPage')
self.assertEqual(content['meta']['parent']['meta']['detail_url'], 'http://localhost/api/v2beta/pages/5/')
self.assertEqual(content['meta']['parent']['meta']['html_url'], 'http://localhost/blog-index/')
# Check that the custom fields are included
self.assertIn('date', content)
self.assertIn('body', content)
self.assertIn('tags', content)
self.assertIn('feed_image', content)
self.assertIn('related_links', content)
self.assertIn('carousel_items', content)
# Check that the date was serialised properly
self.assertEqual(content['date'], '2013-12-02')
# Check that the tags were serialised properly
self.assertEqual(content['tags'], ['bird', 'wagtail'])
# Check that the feed image was serialised properly
self.assertIsInstance(content['feed_image'], dict)
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title'})
self.assertEqual(content['feed_image']['id'], 7)
self.assertIsInstance(content['feed_image']['meta'], dict)
self.assertEqual(set(content['feed_image']['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(content['feed_image']['meta']['type'], 'wagtailimages.Image')
self.assertEqual(content['feed_image']['meta']['detail_url'], 'http://localhost/api/v2beta/images/7/')
# Check that the child relations were serialised properly
self.assertEqual(content['related_links'], [])
for carousel_item in content['carousel_items']:
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'embed_url', 'link', 'caption', 'image'})
self.assertEqual(set(carousel_item['meta'].keys()), {'type'})
def test_meta_parent_id_doesnt_show_root_page(self):
# Root page isn't in the site so don't show it if the user is looking at the home page
response = self.get_response(2)
content = json.loads(response.content.decode('UTF-8'))
self.assertIsNone(content['meta']['parent'])
def test_field_ordering(self):
response = self.get_response(16)
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'body',
'tags',
'date',
'feed_image',
'carousel_items',
'related_links',
]
self.assertEqual(list(content.keys()), field_order)
def test_null_foreign_key(self):
models.BlogEntryPage.objects.filter(id=16).update(feed_image_id=None)
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('related_links', content)
self.assertEqual(content['feed_image'], None)
# FIELDS
def test_remove_fields(self):
response = self.get_response(16, fields='-title')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('id', set(content.keys()))
self.assertNotIn('title', set(content.keys()))
def test_remove_meta_fields(self):
response = self.get_response(16, fields='-html_url')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('detail_url', set(content['meta'].keys()))
self.assertNotIn('html_url', set(content['meta'].keys()))
def test_remove_all_meta_fields(self):
response = self.get_response(16, fields='-type,-detail_url,-slug,-first_published_at,-html_url,-search_description,-show_in_menus,-parent,-seo_title')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('id', set(content.keys()))
self.assertNotIn('meta', set(content.keys()))
def test_remove_id_field(self):
response = self.get_response(16, fields='-id')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('title', set(content.keys()))
self.assertNotIn('id', set(content.keys()))
def test_remove_all_fields(self):
response = self.get_response(16, fields='_,id,type')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content.keys()), {'id', 'meta'})
self.assertEqual(set(content['meta'].keys()), {'type'})
def test_nested_fields(self):
response = self.get_response(16, fields='feed_image(width,height)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_nested_fields(self):
response = self.get_response(16, fields='feed_image(-title)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta'})
def test_all_nested_fields(self):
response = self.get_response(16, fields='feed_image(*)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_all_nested_fields(self):
response = self.get_response(16, fields='feed_image(_,id)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id'})
def test_nested_nested_fields(self):
response = self.get_response(16, fields='carousel_items(image(width,height))')
content = json.loads(response.content.decode('UTF-8'))
for carousel_item in content['carousel_items']:
# Note: inline objects default to displaying all fields
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'image', 'embed_url', 'caption', 'link'})
self.assertEqual(set(carousel_item['image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_fields_child_relation_is_list(self):
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIsInstance(content['related_links'], list)
def test_fields_foreign_key(self):
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
feed_image = content['feed_image']
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/api/v2beta/images/%d/' % feed_image['id'])
def test_star_in_wrong_position_gives_error(self):
response = self.get_response(16, fields='title,*')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "fields error: '*' must be in the first position"})
def test_unknown_nested_fields_give_error(self):
response = self.get_response(16, fields='feed_image(123,title,abc)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(16, fields='path')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: path"})
def test_fields_unknown_field_gives_error(self):
response = self.get_response(16, fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_fields_remove_unknown_field_gives_error(self):
response = self.get_response(16, fields='-123,-title,-abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_nested_fields_on_non_relational_field_gives_error(self):
response = self.get_response(16, fields='title(foo,bar)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "'title' does not support nested fields"})
class TestPageDetailWithStreamField(TestCase):
fixtures = ['test.json']
def setUp(self):
self.homepage = Page.objects.get(url_path='/home/')
def make_stream_page(self, body):
stream_page = StreamPage(
title='stream page',
slug='stream-page',
body=body
)
return self.homepage.add_child(instance=stream_page)
def test_can_fetch_streamfield_content(self):
stream_page = self.make_stream_page('[{"type": "text", "value": "foo"}]')
response_url = reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-type'], 'application/json')
content = json.loads(response.content.decode('utf-8'))
self.assertIn('id', content)
self.assertEqual(content['id'], stream_page.id)
self.assertIn('body', content)
self.assertEqual(content['body'], [{'type': 'text', 'value': 'foo'}])
def test_image_block(self):
stream_page = self.make_stream_page('[{"type": "image", "value": 1}]')
response_url = reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
content = json.loads(response.content.decode('utf-8'))
# ForeignKeys in a StreamField shouldn't be translated into dictionary representation
self.assertEqual(content['body'], [{'type': 'image', 'value': 1}])
@override_settings(
WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
},
WAGTAILAPI_BASE_URL='http://api.example.com',
)
@mock.patch('wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend.purge')
class TestPageCacheInvalidation(TestCase):
fixtures = ['demosite.json']
@classmethod
def setUpClass(cls):
super(TestPageCacheInvalidation, cls).setUpClass()
signal_handlers.register_signal_handlers()
@classmethod
def tearDownClass(cls):
super(TestPageCacheInvalidation, cls).tearDownClass()
signal_handlers.unregister_signal_handlers()
def test_republish_page_purges(self, purge):
Page.objects.get(id=2).save_revision().publish()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/2/')
def test_unpublish_page_purges(self, purge):
Page.objects.get(id=2).unpublish()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/2/')
def test_delete_page_purges(self, purge):
Page.objects.get(id=16).delete()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/16/')
def test_save_draft_doesnt_purge(self, purge):
Page.objects.get(id=2).save_revision()
purge.assert_not_called()
|
|
# Copyright (c) 2013, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""CPUID register decoding."""
import bits
from bits.platformbase import CPUID, cpuidfield
import struct
EAX = 0
EBX = 1
ECX = 2
EDX = 3
class LEAF_0(CPUID):
"""Basic CPUID information including vendor and max supported basic leaf."""
leaf = 0x0
max_leaf = cpuidfield(EAX, 31, 0, doc="Highest value the CPUID recognizes for returning basic processor information")
@property
def vendor(self):
"""Vendor identification string"""
return struct.pack('III', self.regs.ebx, self.regs.edx, self.regs.ecx)
class LEAF_1(CPUID):
"""Basic CPUID Information
Contains version type, family, model, and stepping ID; brand index; CLFLUSH
line size; maximum number of addressable IDs for logical processors in the
physical package; initial APIC ID; and feature information"""
leaf = 0x1
stepping = cpuidfield(EAX, 3, 0, doc="Stepping ID")
model = cpuidfield(EAX, 7, 4, doc="Model")
family = cpuidfield(EAX, 11, 8, doc="Family ID")
processor_type = cpuidfield(EAX, 13, 12, doc="Processor Type")
ext_model = cpuidfield(EAX, 19, 16, doc="Extended Model ID")
ext_family = cpuidfield(EAX, 27, 20, doc="Extended Family ID")
brand_index = cpuidfield(EBX, 7, 0, doc="Brand index")
CLFLUSH_line_size = cpuidfield(EBX, 15, 8, doc="CLFLUSH instruction cache line size (in 8-byte words)")
max_logical_processor_ids = cpuidfield(EBX, 23, 16, doc="The maximum number of addressable IDs for logical processors in the physical package.")
initial_apic_id = cpuidfield(EBX, 31, 24, doc="Initial APIC ID")
sse3 = cpuidfield(ECX, 0, 0)
pclmulqdq = cpuidfield(ECX, 1, 1)
dtes64 = cpuidfield(ECX, 2, 2)
monitor = cpuidfield(ECX, 3, 3)
ds_cpl = cpuidfield(ECX, 4, 4)
vmx = cpuidfield(ECX, 5, 5)
smx = cpuidfield(ECX, 6, 6)
est = cpuidfield(ECX, 7, 7)
tm2 = cpuidfield(ECX, 8, 8)
ssse3 = cpuidfield(ECX, 9, 9)
cnxt_id = cpuidfield(ECX, 10, 10)
fma = cpuidfield(ECX, 12, 12)
cmpxchg16b = cpuidfield(ECX, 13, 13)
xtpr = cpuidfield(ECX, 14, 14)
pdcm = cpuidfield(ECX, 15, 15)
pcid = cpuidfield(ECX, 17, 17)
dca = cpuidfield(ECX, 18, 18)
sse4_1 = cpuidfield(ECX, 19, 19)
sse4_2 = cpuidfield(ECX, 20, 20)
x2apic = cpuidfield(ECX, 21, 21)
movbe = cpuidfield(ECX, 22, 22)
popcnt = cpuidfield(ECX, 23, 23)
tsc_deadline = cpuidfield(ECX, 24, 24)
aes = cpuidfield(ECX, 25, 25)
xsave = cpuidfield(ECX, 26, 26)
osxsave = cpuidfield(ECX, 27, 27)
avx = cpuidfield(ECX, 28, 28)
f16c = cpuidfield(ECX, 29, 29)
rdrand = cpuidfield(ECX, 30, 30)
fpu = cpuidfield(EDX, 0, 0)
vme = cpuidfield(EDX, 1, 1)
de = cpuidfield(EDX, 2, 2)
pse = cpuidfield(EDX, 3, 3)
tsc = cpuidfield(EDX, 4, 4)
msr = cpuidfield(EDX, 5, 5)
pae = cpuidfield(EDX, 6, 6)
mce = cpuidfield(EDX, 7, 7)
cx8 = cpuidfield(EDX, 8, 8)
apic = cpuidfield(EDX, 9, 9)
sep = cpuidfield(EDX, 11, 11)
mtrr = cpuidfield(EDX, 12, 12)
pge = cpuidfield(EDX, 13, 13)
mca = cpuidfield(EDX, 14, 14)
cmov = cpuidfield(EDX, 15, 15)
pat = cpuidfield(EDX, 16, 16)
pse36 = cpuidfield(EDX, 17, 17)
psn = cpuidfield(EDX, 18, 18)
clfsh = cpuidfield(EDX, 19, 19)
ds = cpuidfield(EDX, 21, 21)
acpi = cpuidfield(EDX, 22, 22)
mmx = cpuidfield(EDX, 23, 23)
fxsr = cpuidfield(EDX, 24, 24)
sse = cpuidfield(EDX, 25, 25)
sse2 = cpuidfield(EDX, 26, 26)
ss = cpuidfield(EDX, 27, 27)
htt = cpuidfield(EDX, 28, 28)
tm = cpuidfield(EDX, 29, 29)
pbe = cpuidfield(EDX, 31, 31)
@property
def display_family(self):
if self.family == 0xf:
return self.ext_family + self.family
return self.family
@property
def display_model(self):
if self.family == 0xf or self.family == 0x6:
return self.ext_model << 4 + self.model
return self.model
class LEAF_2(CPUID):
"""TLB, Cache, and Prefetch Information"""
leaf = 0x2
times_to_run = cpuidfield(EAX, 7, 0, doc="Number of times CPUID must be executed with EAX = 2 to retrieve a complete description of the processor's TLB, Cache, and Prefetch hardware")
class LEAF_4(CPUID):
"""Deterministic cache parameters
Returns encoded data that describes a set of deterministic cache parameters
for the cache level associated in ECX"""
leaf = 0x4
cache_type = cpuidfield(EAX, 4, 0, doc="Cache Type Field")
cache_level = cpuidfield(EAX, 7, 5, doc="Cache Level")
self_initializing = cpuidfield(EAX, 8, 8, doc="Self Initializing Cache Level")
fully_associative = cpuidfield(EAX, 9, 9, doc="Fully Associative Cache")
max_logical_processors_sharing_cache_z = cpuidfield(EAX, 25, 14, doc="Max number of addressable IDs for logical processors sharing this cache (zero based)")
max_cores_sharing_cache_z = cpuidfield(EAX, 31, 26, doc="Max number of addressable IDs for processor cores in the physical package (zero based)")
line_size_z = cpuidfield(EBX, 11, 0, doc="System Coherency Line Size (zero-based)")
partitions_z = cpuidfield(EBX, 21, 12, doc="Physical Line Partitions (zero-based)")
ways_z = cpuidfield(EBX, 31, 22, doc="Ways of associativity (zero-based)")
sets_z = cpuidfield(ECX, 31, 0, doc="Sets (zero-based)")
write_back_invalidate = cpuidfield(EDX, 0, 0, doc="Write-back Invalidate/Invalidate")
cache_inclusiveness = cpuidfield(EDX, 1, 1, doc="Cache Inclusiveness")
complex_cache_indexing = cpuidfield(EDX, 2, 2, doc="Complex Cache indexing")
@property
def max_logical_processors_sharing_cache(self):
"""Maximum number of addressable IDs for logical processors sharing this cache"""
return self.max_logical_processors_sharing_cache_z + 1
@property
def max_cores_sharing_cache(self):
"""Maximum number of addressable IDs for processor cores in the physical pacakge"""
return self.max_cores_sharing_cache_z + 1
@property
def partitions(self):
"""Number of physical line partitions"""
return self.partitions_z + 1
@property
def line_size(self):
"""System Coherency line size"""
return self.line_size_z + 1
@property
def ways(self):
"""Ways of associativity"""
return self.ways_z + 1
@property
def sets(self):
"""Number of sets"""
return self.sets_z + 1
@property
def cache_size(self):
"""Cache size in bytes"""
return self.ways * self.partitions * self.line_size * self.sets
class LEAF_5(CPUID):
"""MONITOR/MWAIT Leaf
Returns information about features available to MONITOR/MWAIT instructions"""
leaf = 0x5
smallest_monitor_line_size = cpuidfield(EAX, 15, 0, doc="Smallest monitor-line size in bytes")
largest_monitor_line_size = cpuidfield(EBX, 15, 0, doc="Largest monitor-line size in bytes")
monitor_mwait_supported = cpuidfield(ECX, 0, 0, doc="Enumeration of MONITOR/MWAIT extensions supported")
interrupt_break_event_supported = cpuidfield(ECX, 1, 1, doc="Supports treating interrupts as break-events for MWAIT, even when interrupts disabled")
c0 = cpuidfield(EDX, 3, 0, doc="Number of C0 sub C-states supported using MWAIT")
c1 = cpuidfield(EDX, 7, 4, doc="Number of C1 sub C-states supported using MWAIT")
c2 = cpuidfield(EDX, 11, 8, doc="Number of C2 sub C-states supported using MWAIT")
c3 = cpuidfield(EDX, 15, 12, doc="Number of C3 sub C-states supported using MWAIT")
c4 = cpuidfield(EDX, 19, 16, doc="Number of C4 sub C-states supported using MWAIT")
class LEAF_6(CPUID):
"""Thermal and Power Management leaf
Returns information about the maximum input values for sub-leaves that contain extended feature flags."""
leaf = 0x6
digital_temperature_sensor_supported = cpuidfield(EAX, 0, 0, doc = "Digital temperature sensor is supported if set")
turbo_boost_available = cpuidfield(EAX, 1, 1, doc = "Intel Turbo Boost technology available")
arat_supported = cpuidfield(EAX, 2, 2, doc = "APIC-Timer-always-running feature is supported if set")
pln_supported = cpuidfield(EAX, 4, 4, doc = "Power limit notification controls are supported if set")
ecmd_supported = cpuidfield(EAX, 5, 5, doc = "Clock modulation duty cycle extension is supported if set")
package_thermal_management_supported = cpuidfield(EAX, 6, 6, doc = "Package thermal management is supported if set")
num_interrupt_thresholds = cpuidfield(EBX, 3, 0, doc="Number of interrupt thresholds in digital thermal sensor")
hardware_coordination_feedback_capability = cpuidfield(ECX, 0, 0, doc="Hardware coordination feedback capability")
performance_energy_bias = cpuidfield(ECX, 3, 3, doc="Performance-energy bias preference support")
class LEAF_7(CPUID):
"""Structured Extended Feature Flags Enumeration Leaf
Returns information about the maximum input value for sub-leaves that contain
extended feature flags"""
leaf = 0x7
max_input_values = cpuidfield(EAX, 31, 0, doc="Reports the maximum input value for supported leaf 7 sub-leaves")
fsgsbase = cpuidfield(EBX, 0, 0, doc="Supports RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE if 1")
ia32_tsc_adjust_msr = cpuidfield(EBX, 1, 1, doc="IA32_TSC_ADJUST MSR is supported if 1")
bmi1 = cpuidfield(EBX, 3, 3)
hle = cpuidfield(EBX, 4, 4)
avx2 = cpuidfield(EBX, 5, 5)
smep = cpuidfield(EBX, 7, 7, doc="Supports Supervisor Mode Execution Protection if 1")
bmi2 = cpuidfield(EBX, 8, 8)
erms = cpuidfield(EBX, 9, 9, doc="Supports Enhanced REP MOVSB/STOSB if 1")
invpcid = cpuidfield(EBX, 10, 10, doc="Supports INVPCID instruction for system software that manages process-context identifiers if 1")
rtm = cpuidfield(EBX, 11, 11)
qm = cpuidfield(EBX, 12, 12, doc="Supports Quality of Service Monitoring capability if 1")
deprecate_fpu = cpuidfield(EBX, 13, 13, doc="Deprecates FPS CS and FPU DS values if 1")
rdseed = cpuidfield(EBX, 18, 18)
adx = cpuidfield(EBX, 19, 19)
smap = cpuidfield(EBX, 20, 20)
class LEAF_9(CPUID):
"""Direct Cache Access Information leaf
Returns information about Direct Cache Access capabilities"""
leaf = 0x9
platform_dca_cap = cpuidfield(EAX, 31, 0, doc="Value of bits of IA32_PLATFORM_DCA_CAP MSR (address 1F8H)")
class LEAF_A(CPUID):
"""Architectural Performance Monitoring Leaf
Returns information about support for architectural performance monitoring capabilities"""
leaf = 0xA
architectural_performance_monitor_version_id = cpuidfield(EAX, 7, 0, doc="Version ID of architectural performance monitoring")
gp_performance_monitor_counters = cpuidfield(EAX, 15, 8, doc="Number of general-purpose performance monitoring counter per logical processor")
gp_performance_counter_width = cpuidfield(EAX, 23, 16, doc="Bit width of general-purpose, performance monitoring counter")
ebx_bit_vector_length = cpuidfield(EAX, 31, 24, doc="Length of EBX bit vector to enumerate architectural performance monitoring events")
core_cycle_event = cpuidfield(EBX, 0, 0, doc="Core cycle event not available if 1")
instruction_retired_event = cpuidfield(EBX, 1, 1, doc="Instruction retired event not available if 1")
reference_cycles_event = cpuidfield(EBX, 2, 2, doc="Reference cycles event not available if 1")
llc_ref_event = cpuidfield(EBX, 3, 3, doc="Last-level cache reference event not available if 1")
llc_misses_event = cpuidfield(EBX, 4, 4, doc= "Last-level cache misses event not available if 1")
branch_instruction_retired_event = cpuidfield(EBX, 5, 5, doc="Branch instruction retired event not available if 1")
branch_mispredict_retired_event = cpuidfield(EBX, 6, 6, doc="Branch mispredict retired event not available if 1")
ff_performance_counters = cpuidfield(EDX, 4, 0, doc="Number of fixed-function performance counters")
ff_performance_counter_width = cpuidfield(EDX, 12, 5, doc="Bit width of fixed-function performance counters")
class LEAF_B(CPUID):
"""Extended Topology Enumeration Leaf
Returns information about extended topology enumeration data"""
leaf = 0xB
num_bit_shift = cpuidfield(EAX, 4, 0, doc="Number of bits to shift right on x2APID ID to get a unique topology ID of the next level type")
logical_proccessors_at_level = cpuidfield(EBX, 15, 0, doc="Number of logical processors at this level type.")
level_number = cpuidfield(ECX, 7, 0, doc="Level number")
level_type = cpuidfield(ECX, 15, 8, doc="Level type")
x2apic_id = cpuidfield(EDX, 31, 0, doc="x2APIC ID of the current logical processor")
class LEAF_D(CPUID):
"""Processor Extended State Enumeration Main Leaf and Sub-Leaves.
Returns information about the bit-vector representation of all processor
state extensions that are supported in the processor, and storage size
requirements of the XSAVE/XRSTOR area. Output depends on initial value of ECX."""
leaf = 0xD
valid_bits_xcr0_lower = cpuidfield(EAX, 31, 0, doc="Reports the valid bit fields of the lower 32 bits of XCR0. If a bit is 0, the corresponding bit field in XCR0 is reserved")
legacy_x87 = cpuidfield(EAX, 0, 0, doc="legacy x87")
sse_128_bit = cpuidfield(EAX, 1, 1, doc="128-bit SSE")
avx_256_bit = cpuidfield(EAX, 2, 2, doc="256-bit AVX")
max_size_enabled_xcr0 = cpuidfield(EBX, 31, 0, doc="Maximum size (bytes, from the beginning of the XSAVE/XRSTOR save area) required by enabled features in XCR0. May be different than ECX if some features at the end of the XSAVE save area are not enabled.")
max_size_supported_xcr0 = cpuidfield(ECX, 31, 0, doc="Maximum size (bytes, from the beginning of the XSAVE/XRSTOR save area) of the XSAVE/XRSTOR save area required by all supported features in the processor, i.e all the valid bit fields in XCR0.")
valid_bits_xcr0_upper = cpuidfield(EDX, 31, 0, doc="The valid bit fields of the upper 32 bits of XCR0. If a bit is 0, the corresponding bit field in XCR0 is reserved.")
def __getitem__(self, subleaf):
if subleaf == 0:
return self.read(self.apicid, subleaf)
elif subleaf == 1:
return LEAF_D_1.read(self.apicid, subleaf)
return LEAF_D_n.read(self.apicid, subleaf)
class LEAF_D_1(CPUID):
"""Processor Extended State Enumeration Main Leaf and Sub-Leaves.
Returns information about the bit-vector representation of all processor
state extensions that are supported in the processor, and storage size
requirements of the XSAVE/XRSTOR area. Output depends on initial value of ECX."""
leaf = 0xD
xsaveopt = cpuidfield(EAX, 0, 0, doc="XSAVEOPT is available")
class LEAF_D_n(CPUID):
"""Processor Extended State Enumeration Main Leaf and Sub-Leaves.
Returns information about the bit-vector representation of all processor
state extensions that are supported in the processor, and storage size
requirements of the XSAVE/XRSTOR area. Output depends on initial value of ECX."""
leaf = 0xD
size = cpuidfield(EAX, 31, 0, doc="The size in bytes (from the offset specified in EBX) of the save area for an extended state feature associated with a valid sub-leaf index")
offset = cpuidfield(EBX, 31, 0, doc="The offset in bytes of this extended state component's save area from the beginning of the XSAVE/XRSTOR area.")
class LEAF_F(CPUID):
"""Quality of Service Resource Type Enumeration Sub-Leaf and L3 Cache QoS Capability Enumeration Sub-leaf. Depends on value of ECX
Returns Quality of Service (QoS) Enumeration Information."""
leaf = 0xF
def __getitem__(self, subleaf):
if subleaf == 0:
return self.read(self.apicid, subleaf)
elif subleaf == 1:
return LEAF_F_1.read(self.apicid, subleaf)
return LEAF_F_n.read(self.apicid, subleaf)
max_range_rmid_z = cpuidfield(EBX, 31, 0, doc="Maximum range (zero-based) of RMID within this physical processor of all types.")
l3_cache_qos = cpuidfield(EDX, 1, 1, doc="Supports L3 Cache QoS if 1")
@property
def max_range_rmid(self):
"""Maximum range of RMID within this physical processor of all types."""
return self.max_range_rmid_z + 1
class LEAF_F_1(CPUID):
"""Quality of Service Resource Type Enumeration Sub-Leaf and L3 Cache QoS Capability Enumeration Sub-leaf. Depends on value of ECX
Returns L3 Cache QoS Capability Enumeration Information."""
leaf = 0xF
qm_ctr_conversion_factor = cpuidfield(EBX, 31, 0, doc="Conversion factor from reported IA32_QM_CTR value to occupancy metric (bytes).")
l3_occupancy_monitoring = cpuidfield(EDX, 0, 0, doc="Supports L3 occupancy monitoring if 1")
max_range_rmid_z = cpuidfield(ECX, 31, 0, doc="Maximum range (zero-based) of RMID of this resource type")
@property
def max_range_rmid(self):
"""Maximum range of RMID of this resource type"""
return self.max_range_rmid_z + 1
class LEAF_F_n(CPUID):
"""Quality of Service Resource Type Enumeration Sub-Leaf and L3 Cache QoS Capability Enumeration Sub-leaf. Depends on value of ECX
Returns Quality of Service (QoS) Enumeration Information."""
leaf = 0xF
class LEAF_80000000(CPUID):
"""Extended Function CPUID Information"""
leaf = 0x80000000
max_extended_leaf = cpuidfield(EAX, 31, 0, doc="Highest extended function input value understood by CPUID")
class LEAF_80000001(CPUID):
"""Extended Function CPUID Information"""
leaf = 0x80000001
ext_signature_feature_bits = cpuidfield(EAX, 31, 0, doc="Extended processor signature and feature bits")
lahf_sahf_64 = cpuidfield(ECX, 0, 0, doc="LAHF/SAHF available in 64-bit mode")
lzcnt = cpuidfield(ECX, 5, 5)
syscall_sysret_64 = cpuidfield(EDX, 11, 11, doc="SYSCALL/SYSRET available in 64-bit mode")
execute_disable = cpuidfield(EDX, 20, 20, doc="Execute Disable Bit available")
gbyte_pages = cpuidfield(EDX, 26, 26, doc="GByte pages are available if 1")
rdtscp_ia32_tsc_aux = cpuidfield(EDX, 27, 27, doc="RDTSCP and IA32_TSC_AUX are available if 1")
intel_64 = cpuidfield(EDX, 29, 29, doc="Intel(R) 64 Architecture available if 1")
class LEAF_80000002(CPUID):
"""Extended Function CPUID Information
Processor Brand String"""
leaf = 0x80000002
@property
def brandstring(self):
"""Processor Brand String"""
return struct.pack('IIII', self.regs.eax, self.regs.ebx, self.regs.ecx, self.regs.edx).rstrip("\x00")
class LEAF_80000003(CPUID):
"""Extended Function CPUID Information
Processor Brand String Continued"""
leaf = 0x80000003
@property
def brandstring(self):
"""Processor Brand String"""
return struct.pack('IIII', self.regs.eax, self.regs.ebx, self.regs.ecx, self.regs.edx).rstrip("\x00")
class LEAF_80000004(CPUID):
"""Extended Function CPUID Information
Processor Brand String Continued"""
leaf = 0x80000004
@property
def brandstring(self):
"""Processor Brand String"""
return struct.pack('IIII', self.regs.eax, self.regs.ebx, self.regs.ecx, self.regs.edx).rstrip("\x00")
class LEAF_80000006(CPUID):
"""Extended Function CPUID Information"""
leaf = 0x80000006
cache_line_size = cpuidfield(ECX, 7, 0, doc="Cache Line size in bytes")
l2_associativity = cpuidfield(ECX, 15, 12, doc="L2 Associativity field")
cache_size_k = cpuidfield(ECX, 31, 16, doc="Cache size in 1K units")
class LEAF_80000007(CPUID):
"""Misc Feature Flags"""
leaf = 0x80000007
invariant_tsc = cpuidfield(EDX, 8, 8, doc="Invariant TSC available if 1")
class LEAF_80000008(CPUID):
"""Returns linear/physical address size"""
leaf = 0x80000008
physical_address_bits = cpuidfield(EAX, 7, 0, doc="# Physical Address bits")
linear_address_bits = cpuidfield(EAX, 15, 8, doc="# Linear Address bits")
def generate_cpuids(apicid):
max_leaf = LEAF_0.read(apicid).max_leaf
max_extended_leaf = LEAF_80000000.read(apicid).max_extended_leaf
leafs = [
LEAF_0,
LEAF_1,
LEAF_2,
LEAF_4,
LEAF_5,
LEAF_6,
LEAF_7,
LEAF_9,
LEAF_A,
LEAF_B,
LEAF_D,
LEAF_F,
LEAF_80000000,
LEAF_80000001,
LEAF_80000002,
LEAF_80000003,
LEAF_80000004,
LEAF_80000006,
LEAF_80000007,
LEAF_80000008,
]
return [l for l in leafs if l.leaf <= max_leaf or (l.leaf >= 0x8000000 and l.leaf <= max_extended_leaf)]
|
|
'''
Stack Layout
============
.. only:: html
.. image:: images/stacklayout.gif
:align: right
.. only:: latex
.. image:: images/stacklayout.png
:align: right
.. versionadded:: 1.0.5
:class:`StackLayout` arranges children vertically or horizontally, as many
as the layout can fit.
.. warning:
This is experimental and subject to change as long as this warning notice is
present.
'''
__all__ = ('StackLayout', )
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, OptionProperty, \
ReferenceListProperty, VariableListProperty
class StackLayout(Layout):
'''Stack layout class. See module documentation for more information.
'''
spacing = VariableListProperty([0, 0], length=2)
'''Spacing between children: [spacing_horizontal, spacing_vertical].
spacing also accepts a one argument form [spacing].
:data:`spacing` is a
:class:`~kivy.properties.VariableListProperty`, default to [0, 0].
'''
padding = VariableListProperty([0, 0, 0, 0])
'''Padding between layout box and children: [padding_left, padding_top,
padding_right, padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.7.0
Replaced NumericProperty with VariableListProperty.
:data:`padding` is a
:class:`~kivy.properties.VariableListProperty`, default to [0, 0,
0, 0].
'''
orientation = OptionProperty('lr-tb', options=(
'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt', 'bt-lr', 'rl-bt', 'bt-rl'))
'''Orientation of the layout.
:data:`orientation` is an :class:`~kivy.properties.OptionProperty`, default
to 'lr-tb'.
Valid orientations are: 'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt',
'bt-lr', 'rl-bt', 'bt-rl'
.. versionchanged:: 1.5.0
:data:`orientation` now correctly handles all valid combinations of
'lr','rl','tb','bt'. Before this version only 'lr-tb' and
'tb-lr' were supported, and 'tb-lr' was misnamed and placed
widgets from bottom to top and from right to left (reversed compared
to what was expected).
.. note::
lr mean Left to Right.
rl mean Right to Left.
tb mean Top to Bottom.
bt mean Bottom to Top.
'''
minimum_width = NumericProperty(0)
'''Minimum width needed to contain all children.
.. versionadded:: 1.0.8
:data:`minimum_width` is a :class:`kivy.properties.NumericProperty`, default
to 0.
'''
minimum_height = NumericProperty(0)
'''Minimum height needed to contain all children.
.. versionadded:: 1.0.8
:data:`minimum_height` is a :class:`kivy.properties.NumericProperty`,
default to 0.
'''
minimum_size = ReferenceListProperty(minimum_width, minimum_height)
'''Minimum size needed to contain all children.
.. versionadded:: 1.0.8
:data:`minimum_size` is a :class:`~kivy.properties.ReferenceListProperty` of
(:data:`minimum_width`, :data:`minimum_height`) properties.
'''
def __init__(self, **kwargs):
super(StackLayout, self).__init__(**kwargs)
self.bind(
padding=self._trigger_layout,
spacing=self._trigger_layout,
children=self._trigger_layout,
orientation=self._trigger_layout,
size=self._trigger_layout,
pos=self._trigger_layout)
def do_layout(self, *largs):
# optimize layout by preventing looking at the same attribute in a loop
selfpos = self.pos
selfsize = self.size
orientation = self.orientation.split('-')
padding_left = self.padding[0]
padding_top = self.padding[1]
padding_right = self.padding[2]
padding_bottom = self.padding[3]
padding_x = padding_left + padding_right
padding_y = padding_top + padding_bottom
spacing_x, spacing_y = self.spacing
lc = []
# Determine which direction and in what order to place the widgets
posattr = [0] * 2
posdelta = [0] * 2
posstart = [0] * 2
for i in (0, 1):
posattr[i] = 1 * (orientation[i] in ('tb', 'bt'))
k = posattr[i]
if orientation[i] == 'lr':
# left to right
posdelta[i] = 1
posstart[i] = selfpos[k] + padding_left
elif orientation[i] == 'bt':
# bottom to top
posdelta[i] = 1
posstart[i] = selfpos[k] + padding_bottom
elif orientation[i] == 'rl':
# right to left
posdelta[i] = -1
posstart[i] = selfpos[k] + selfsize[k] - padding_right
else:
# top to bottom
posdelta[i] = -1
posstart[i] = selfpos[k] + selfsize[k] - padding_top
innerattr, outerattr = posattr
ustart, vstart = posstart
deltau, deltav = posdelta
del posattr, posdelta, posstart
u = ustart # inner loop position variable
v = vstart # outer loop position variable
# space calculation, used for determining when a row or column is full
if orientation[0] in ('lr', 'rl'):
lu = self.size[innerattr] - padding_x
sv = padding_y # size in v-direction, for minimum_size property
su = padding_x # size in h-direction
spacing_u = spacing_x
spacing_v = spacing_y
else:
lu = self.size[innerattr] - padding_y
sv = padding_x # size in v-direction, for minimum_size property
su = padding_y # size in h-direction
spacing_u = spacing_y
spacing_v = spacing_x
# space calculation, row height or column width, for arranging widgets
lv = 0
urev = (deltau < 0)
vrev = (deltav < 0)
for c in reversed(self.children):
if c.size_hint[0]:
c.width = c.size_hint[0] * (selfsize[0] - padding_x)
if c.size_hint[1]:
c.height = c.size_hint[1] * (selfsize[1] - padding_y)
# does the widget fit in the row/column?
if lu - c.size[innerattr] >= 0:
lc.append(c)
lu -= c.size[innerattr] + spacing_u
lv = max(lv, c.size[outerattr])
continue
# push the line
sv += lv + spacing_v
for c2 in lc:
if urev:
u -= c2.size[innerattr]
c2.pos[innerattr] = u
pos_outer = v
if vrev:
# v position is actually the top/right side of the widget
# when going from high to low coordinate values,
# we need to subtract the height/width from the position.
pos_outer -= c2.size[outerattr]
c2.pos[outerattr] = pos_outer
if urev:
u -= spacing_u
else:
u += c2.size[innerattr] + spacing_u
v += deltav * lv
v += deltav * spacing_v
lc = [c]
lv = c.size[outerattr]
lu = selfsize[innerattr] - su - c.size[innerattr] - spacing_u
u = ustart
if lc:
# push the last (incomplete) line
sv += lv + spacing_v
for c2 in lc:
if urev:
u -= c2.size[innerattr]
c2.pos[innerattr] = u
pos_outer = v
if vrev:
pos_outer -= c2.size[outerattr]
c2.pos[outerattr] = pos_outer
if urev:
u -= spacing_u
else:
u += c2.size[innerattr] + spacing_u
self.minimum_size[outerattr] = sv
|
|
"""
Tests for L{klein.plating}.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import json
from klein import Plating
from twisted.web.template import tags, slot
from twisted.web.error import FlattenerError, MissingRenderMethod
from klein.test.test_resource import requestMock, _render
from klein.test.util import TestCase
from klein import Klein
page = Plating(
defaults={
"title": "default title unchanged",
Plating.CONTENT: "NEVER MIND THE CONTENT",
},
tags=tags.html(
tags.head(tags.title(slot("title"))),
tags.body(
tags.h1(slot("title")),
tags.div(slot(Plating.CONTENT),
Class="content")
)
),
)
element = Plating(
defaults={
"a": "NO VALUE FOR A",
"b": "NO VALUE FOR B",
},
tags=tags.div(tags.span("a: ", slot("a")),
tags.span("b: ", slot("b"))),
)
@element.widgeted
def enwidget(a, b):
"""
Provide some values for the L{widget} template.
"""
return {"a": a, "b": b}
class PlatingTests(TestCase):
"""
Tests for L{Plating}.
"""
def setUp(self):
"""
Create an app and a resource wrapping that app for this test.
"""
self.app = Klein()
self.kr = self.app.resource()
def get(self, uri):
"""
Issue a virtual GET request to the given path that is expected to
succeed synchronously, and return the generated request object and
written bytes.
"""
request = requestMock(uri)
d = _render(self.kr, request)
self.successResultOf(d)
return request, request.getWrittenData()
def test_template_html(self):
"""
Rendering a L{Plating.routed} decorated route results in templated
HTML.
"""
@page.routed(self.app.route("/"),
tags.span(slot("ok")))
def plateMe(request):
return {"ok": "test-data-present"}
request, written = self.get(b"/")
self.assertIn(b'<span>test-data-present</span>', written)
self.assertIn(b'<title>default title unchanged</title>', written)
def test_template_json(self):
"""
Rendering a L{Plating.routed} decorated route with a query parameter
asking for JSON will yield JSON instead.
"""
@page.routed(self.app.route("/"),
tags.span(slot("ok")))
def plateMe(request):
return {"ok": "an-plating-test"}
request, written = self.get(b"/?json=true")
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-type')[0],
b'text/json; charset=utf-8'
)
self.assertEquals({"ok": "an-plating-test",
"title": "default title unchanged"},
json.loads(written.decode('utf-8')))
def test_template_numbers(self):
"""
Data returned from a plated method may include numeric types (integers,
floats, and possibly longs), which although they are not normally
serializable by twisted.web.template, will be converted by plating into
their decimal representation.
"""
@page.routed(self.app.route("/"),
tags.div(tags.span(slot("anInteger")),
tags.i(slot("anFloat")),
tags.b(slot("anLong")),
))
def plateMe(result):
return {"anInteger": 7,
"anFloat": 3.2,
"anLong": 0x10000000000000001}
request, written = self.get(b"/")
self.assertIn(b"<span>7</span>", written)
self.assertIn(b"<i>3.2</i>", written)
self.assertIn(b"<b>18446744073709551617</b>", written)
def test_render_list(self):
"""
The C{:list} renderer suffix will render the slot named by the renderer
as a list, filling each slot.
"""
@page.routed(self.app.route("/"),
tags.ul(tags.li(slot("item"),
render="subplating:list")))
def rsrc(request):
return {"subplating": [1, 2, 3]}
request, written = self.get(b"/")
self.assertIn(b'<ul><li>1</li><li>2</li><li>3</li></ul>', written)
self.assertIn(b'<title>default title unchanged</title>', written)
def test_widget_html(self):
"""
When L{Plating.widgeted} is applied as a decorator, it gives the
decorated function a C{widget} attribute which is a version of the
function with a modified return type that turns it into a renderable
HTML sub-element that may fill a slot.
"""
@page.routed(self.app.route("/"),
tags.div(slot("widget")))
def rsrc(request):
return {"widget": enwidget.widget(3, 4)}
request, written = self.get(b"/")
self.assertIn(b"<span>a: 3</span>", written)
self.assertIn(b"<span>b: 4</span>", written)
def test_widget_json(self):
"""
When L{Plating.widgeted} is applied as a decorator, and the result is
serialized to JSON, it appears the same as the returned value despite
the HTML-friendly wrapping described above.
"""
@page.routed(self.app.route("/"),
tags.div(slot("widget")))
def rsrc(request):
return {"widget": enwidget.widget(3, 4)}
request, written = self.get(b"/?json=1")
self.assertEqual(json.loads(written.decode('utf-8')),
{"widget": {"a": 3, "b": 4},
"title": "default title unchanged"})
def test_prime_directive_return(self):
"""
Nothing within these Articles Of Federation shall authorize the United
Federation of Planets to alter the return value of a callable by
applying a decorator to it...
"""
exact_result = {"ok": "some nonsense value"}
@page.routed(self.app.route("/"),
tags.span(slot("ok")))
def plateMe(request):
return exact_result
self.assertIdentical(plateMe(None), exact_result)
def test_prime_directive_arguments(self):
"""
... or shall require the function to modify its signature under these
Articles Of Federation.
"""
@page.routed(self.app.route("/"),
tags.span(slot("ok")))
def plateMe(request, one, two, three):
return (one, two, three)
exact_one = {"one": "and"}
exact_two = {"two": "and"}
exact_three = {"three": "and"}
result_one, result_two, result_three = plateMe(
None, exact_one, exact_two, three=exact_three
)
self.assertIdentical(result_one, exact_one)
self.assertIdentical(result_two, exact_two)
self.assertIdentical(result_three, exact_three)
def test_presentation_only_json(self):
"""
Slots marked as "presentation only" will not be reflected in the
output.
"""
plating = Plating(tags=tags.span(slot("title")),
presentation_slots={"title"})
@plating.routed(self.app.route("/"),
tags.span(slot("data")))
def justJson(request):
return {"title": "uninteresting", "data": "interesting"}
request, written = self.get(b"/?json=1")
self.assertEqual(json.loads(written.decode("utf-8")),
{"data": "interesting"})
def test_missing_renderer(self):
"""
Missing renderers will result in an exception during rendering.
"""
def test(missing):
plating = Plating(tags=tags.span(slot(Plating.CONTENT)))
@plating.routed(self.app.route("/"),
tags.span(tags.span(render=missing)))
def no(request):
return {}
self.get(b"/")
[fe] = self.flushLoggedErrors(FlattenerError)
self.assertIsInstance(fe.value.args[0], MissingRenderMethod)
test("garbage")
test("garbage:missing")
def test_json_serialize_unknown_type(self):
"""
The JSON serializer will raise a L{TypeError} when it can't find an
appropriate type.
"""
from klein._plating import json_serialize
class reprish(object):
def __repr__(self):
return '<blub>'
te = self.assertRaises(TypeError, json_serialize, {"an": reprish()})
self.assertIn("<blub>", str(te))
|
|
# -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
Extended by Michael Goerz:
* take into account classes exported via __all__ (cls_is_in_module)
* add flag `cluster_modules`
* link to svg files
"""
import inspect
import re
import sys
from hashlib import md5
from collections import defaultdict
import os
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from six import text_type
from six.moves import builtins
import sphinx
from graphviz_ext import render_dot_html, render_dot_latex, \
render_dot_texinfo, figure_wrapper
from sphinx.pycode import ModuleAnalyzer
from sphinx.util import force_decode
if False:
# For type annotation
from typing import Any, Dict, List, Tuple, Dict, Optional # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
(\w+) \s* $ # class/final module name
''', re.VERBOSE)
def try_import(objname):
# type: (unicode) -> Any
"""Import a object or module using *name* and *currentmodule*.
*name* should be a relative name from *currentmodule* or
a fully-qualified name.
Returns imported object or module. If failed, returns None value.
"""
try:
__import__(objname)
return sys.modules.get(objname) # type: ignore
except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3
matched = module_sig_re.match(objname) # type: ignore
if not matched:
return None
modname, attrname = matched.groups()
if modname is None:
return None
try:
__import__(modname)
return getattr(sys.modules.get(modname), attrname, None)
except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3
return None
def cls_is_in_module(cls, mod):
if cls.__module__ == mod.__name__:
return True
elif cls.__name__ in mod.__dict__.get('__all__', ()):
return True
else:
return False
def import_classes(name, currmodule):
# type: (unicode, unicode) -> Any
"""Import a class using its fully-qualified *name*."""
target = None
# import class or module using currmodule
if currmodule:
target = try_import(currmodule + '.' + name)
# import class or module without currmodule
if target is None:
target = try_import(name)
if target is None:
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % name)
if inspect.isclass(target):
# If imported object is a class, just return it
return [target]
elif inspect.ismodule(target):
# If imported object is a module, return classes defined on it
classes = []
for cls in target.__dict__.values():
if inspect.isclass(cls) and cls_is_in_module(cls, mod=target):
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name)
class InheritanceException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0, aliases=None,
cluster_modules=False, top_classes=[]):
# type: (unicode, str, bool, bool, int, Optional[Dict[unicode, unicode]], List[Any]) -> None # NOQA
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
classes = self._import_classes(class_names, currmodule)
self.class_info = self._class_info(classes, show_builtins,
private_bases, parts, aliases,
top_classes)
self.cluster_modules = cluster_modules
if not self.class_info:
raise InheritanceException('No classes found for '
'inheritance diagram')
def _import_classes(self, class_names, currmodule):
# type: (unicode, str) -> List[Any]
"""Import a list of classes."""
classes = [] # type: List[Any]
for name in class_names:
classes.extend(import_classes(name, currmodule))
return classes
def _class_info(self, classes, show_builtins, private_bases, parts, aliases, top_classes):
# type: (List[Any], bool, bool, int, Optional[Dict[unicode, unicode]], List[Any]) -> List[Tuple[unicode, unicode, List[unicode], unicode]] # NOQA
"""Return name and bases for all classes that are ancestors of
*classes*.
*parts* gives the number of dotted name parts that is removed from the
displayed node names.
*top_classes* gives the name(s) of the top most ancestor class to traverse
to. Multiple names can be specified separated by comma.
"""
all_classes = {}
py_builtins = vars(builtins).values()
def recurse(cls):
# type: (Any) -> None
if not show_builtins and cls in py_builtins:
return
if not private_bases and cls.__name__.startswith('_'):
return
nodename = self.class_name(cls, parts, aliases)
fullname = self.class_name(cls, 0, aliases)
# Use first line of docstring as tooltip, if available
tooltip = None
try:
if cls.__doc__:
enc = ModuleAnalyzer.for_module(cls.__module__).encoding
doc = cls.__doc__.strip().split("\n")[0]
if not isinstance(doc, text_type):
doc = force_decode(doc, enc)
if doc:
tooltip = '"%s"' % doc.replace('"', '\\"')
except Exception: # might raise AttributeError for strange classes
pass
baselist = [] # type: List[unicode]
all_classes[cls] = (nodename, fullname, baselist, tooltip)
if fullname in top_classes:
return
for base in cls.__bases__:
if not show_builtins and base in py_builtins:
continue
if not private_bases and base.__name__.startswith('_'):
continue
baselist.append(self.class_name(base, parts, aliases))
if base not in all_classes:
recurse(base)
for cls in classes:
recurse(cls)
return list(all_classes.values())
def class_name(self, cls, parts=0, aliases=None):
# type: (Any, int, Optional[Dict[unicode, unicode]]) -> unicode
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module in ('__builtin__', 'builtins'):
fullname = cls.__name__
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
result = fullname
else:
name_parts = fullname.split('.')
result = '.'.join(name_parts[-parts:])
if aliases is not None and result in aliases:
return aliases[result]
return result
def get_all_class_names(self):
# type: () -> List[unicode]
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _, _) in self.class_info] # type: ignore
# These are the default attrs for graphviz
default_graph_attrs = {
'rankdir': 'LR',
'size': '"8.0, 12.0"',
}
default_node_attrs = {
'shape': 'box',
'fontsize': 10,
'height': 0.25,
'fontname': '"Vera Sans, DejaVu Sans, Liberation Sans, '
'Arial, Helvetica, sans"',
'style': '"setlinewidth(0.5)"',
}
default_edge_attrs = {
'arrowsize': 0.5,
'style': '"setlinewidth(0.5)"',
}
default_cluster_attrs = {
'fontsize': 8,
'fontname': '"Vera Sans, DejaVu Sans, Liberation Sans, '
'Arial, Helvetica, sans"',
'style': 'filled',
'fontcolor': 'gray40',
'color': 'gray95',
}
def _format_node_attrs(self, attrs):
# type: (Dict) -> unicode
return ','.join(['%s=%s' % x for x in sorted(attrs.items())])
def _format_graph_attrs(self, attrs):
# type: (Dict) -> unicode
return ''.join(['%s=%s;\n' % x for x in sorted(attrs.items())])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
# type: (unicode, Dict, BuildEnvironment, Dict, Dict, Dict) -> unicode
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
c_attrs = self.default_cluster_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
c_attrs.update(env.config.inheritance_cluster_attrs)
res = [] # type: List[unicode]
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
subgraphs = defaultdict(list) # subgraph_name => list of node names
for name, fullname, bases, tooltip in sorted(self.class_info):
subgraph_name = ".".join(fullname.split(".")[:-1])
subgraphs[subgraph_name].append(name)
# Write the node
this_node_attrs = n_attrs.copy()
if fullname in urls:
this_node_attrs['URL'] = '"%s"' % urls[fullname]
this_node_attrs['target'] = '"_top"'
if tooltip:
this_node_attrs['tooltip'] = tooltip
res.append(' "%s" [%s];\n' %
(name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_attrs(e_attrs)))
if self.cluster_modules:
for subgraph_name in subgraphs:
res.append('subgraph cluster_%s {\n'
% subgraph_name.replace('.', '_'))
res.append(' label="%s";\n' % subgraph_name)
res.append(' graph[' + self._format_node_attrs(c_attrs) +
"];\n")
res.append(' ' + "; ".join(subgraphs[subgraph_name]) + "\n")
res.append('}\n')
res.append('}\n')
return ''.join(res)
class inheritance_diagram(nodes.General, nodes.Element):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
pass
class InheritanceDiagram(Directive):
"""
Run when the inheritance_diagram directive is first encountered.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'parts': int,
'private-bases': directives.flag,
'caption': directives.unchanged,
'top-classes': directives.unchanged_required,
'cluster_modules': directives.flag,
}
def run(self):
# type: () -> List[nodes.Node]
node = inheritance_diagram()
node.document = self.state.document
env = self.state.document.settings.env
class_names = self.arguments[0].split()
class_role = env.get_domain('py').role('class')
# Store the original content for use as a hash
node['parts'] = self.options.get('parts', 0)
node['content'] = ', '.join(class_names)
node['top-classes'] = []
for cls in self.options.get('top-classes', '').split(','):
cls = cls.strip()
if cls:
node['top-classes'].append(cls)
# Create a graph starting with the list of classes
try:
graph = InheritanceGraph(
class_names, env.ref_context.get('py:module'),
parts=node['parts'],
private_bases='private-bases' in self.options,
aliases=env.config.inheritance_alias,
cluster_modules='cluster_modules' in self.options,
top_classes=node['top-classes'])
except InheritanceException as err:
return [node.document.reporter.warning(err.args[0],
line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = class_role(
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# wrap the result in figure node
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
return [node]
def get_graph_hash(node):
# type: (inheritance_diagram) -> unicode
encoded = (node['content'] + str(node['parts'])).encode('utf-8')
return md5(encoded).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
# type: (nodes.NodeVisitor, inheritance_diagram) -> None
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
# Create a mapping from fully-qualified class names to URLs.
graphviz_output_format = self.builder.env.config.graphviz_output_format.upper()
current_filename = self.builder.current_docname + self.builder.out_suffix
urls = {}
for child in node:
if child.get('refuri') is not None:
if graphviz_output_format == 'SVG':
urls[child['reftitle']] = os.path.join("..", child.get('refuri'))
else:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
if graphviz_output_format == 'SVG':
urls[child['reftitle']] = os.path.join('..', current_filename + '#' + child.get('refid'))
else:
urls[child['reftitle']] = '#' + child.get('refid')
dotcode = graph.generate_dot(name, urls, env=self.builder.env)
render_dot_html(
self, node, dotcode, {}, 'inheritance', 'inheritance',
alt='Inheritance diagram of ' + node['content'],
link_to_svg='<i class="fa fa-external-link" aria-hidden="true"></i>'' SVG')
raise nodes.SkipNode
def latex_visit_inheritance_diagram(self, node):
# type: (nodes.NodeVisitor, inheritance_diagram) -> None
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_latex(self, node, dotcode, {}, 'inheritance')
raise nodes.SkipNode
def texinfo_visit_inheritance_diagram(self, node):
# type: (nodes.NodeVisitor, inheritance_diagram) -> None
"""
Output the graph for Texinfo. This will insert a PNG.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_texinfo(self, node, dotcode, {}, 'inheritance')
raise nodes.SkipNode
def skip(self, node):
# type: (nodes.NodeVisitor, inheritance_diagram) -> None
raise nodes.SkipNode
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.setup_extension('graphviz_ext')
app.add_node(
inheritance_diagram,
latex=(latex_visit_inheritance_diagram, None),
html=(html_visit_inheritance_diagram, None),
text=(skip, None),
man=(skip, None),
texinfo=(texinfo_visit_inheritance_diagram, None))
app.add_directive('inheritance-diagram', InheritanceDiagram)
app.add_config_value('inheritance_graph_attrs', {}, False)
app.add_config_value('inheritance_node_attrs', {}, False)
app.add_config_value('inheritance_edge_attrs', {}, False)
app.add_config_value('inheritance_cluster_attrs', {}, False)
app.add_config_value('inheritance_alias', {}, False)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
|
from sqlalchemy.test.testing import assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy import exc, schema
from sqlalchemy.test import *
from sqlalchemy.test import config, engines
from sqlalchemy.engine import ddl
from sqlalchemy.test.testing import eq_
from sqlalchemy.test.assertsql import AllOf, RegexSQL, ExactSQL, CompiledSQL
from sqlalchemy.dialects.postgresql import base as postgresql
class ConstraintTest(TestBase, AssertsExecutionResults, AssertsCompiledSQL):
def setup(self):
global metadata
metadata = MetaData(testing.db)
def teardown(self):
metadata.drop_all()
def test_constraint(self):
employees = Table('employees', metadata,
Column('id', Integer),
Column('soc', String(40)),
Column('name', String(30)),
PrimaryKeyConstraint('id', 'soc')
)
elements = Table('elements', metadata,
Column('id', Integer),
Column('stuff', String(30)),
Column('emp_id', Integer),
Column('emp_soc', String(40)),
PrimaryKeyConstraint('id', name='elements_primkey'),
ForeignKeyConstraint(['emp_id', 'emp_soc'], ['employees.id', 'employees.soc'])
)
metadata.create_all()
def test_double_fk_usage_raises(self):
f = ForeignKey('b.id')
Column('x', Integer, f)
assert_raises(exc.InvalidRequestError, Column, "y", Integer, f)
def test_circular_constraint(self):
a = Table("a", metadata,
Column('id', Integer, primary_key=True),
Column('bid', Integer),
ForeignKeyConstraint(["bid"], ["b.id"], name="afk")
)
b = Table("b", metadata,
Column('id', Integer, primary_key=True),
Column("aid", Integer),
ForeignKeyConstraint(["aid"], ["a.id"], use_alter=True, name="bfk")
)
metadata.create_all()
def test_circular_constraint_2(self):
a = Table("a", metadata,
Column('id', Integer, primary_key=True),
Column('bid', Integer, ForeignKey("b.id")),
)
b = Table("b", metadata,
Column('id', Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id", use_alter=True, name="bfk")),
)
metadata.create_all()
@testing.fails_on('mysql', 'FIXME: unknown')
def test_check_constraint(self):
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer),
CheckConstraint('x>y'))
bar = Table('bar', metadata,
Column('id', Integer, primary_key=True),
Column('x', Integer, CheckConstraint('x>7')),
Column('z', Integer)
)
metadata.create_all()
foo.insert().execute(id=1,x=9,y=5)
assert_raises(exc.SQLError, foo.insert().execute, id=2,x=5,y=9)
bar.insert().execute(id=1,x=10)
assert_raises(exc.SQLError, bar.insert().execute, id=2,x=5)
def test_unique_constraint(self):
foo = Table('foo', metadata,
Column('id', Integer, primary_key=True),
Column('value', String(30), unique=True))
bar = Table('bar', metadata,
Column('id', Integer, primary_key=True),
Column('value', String(30)),
Column('value2', String(30)),
UniqueConstraint('value', 'value2', name='uix1')
)
metadata.create_all()
foo.insert().execute(id=1, value='value1')
foo.insert().execute(id=2, value='value2')
bar.insert().execute(id=1, value='a', value2='a')
bar.insert().execute(id=2, value='a', value2='b')
assert_raises(exc.SQLError, foo.insert().execute, id=3, value='value1')
assert_raises(exc.SQLError, bar.insert().execute, id=3, value='a', value2='b')
def test_index_create(self):
employees = Table('employees', metadata,
Column('id', Integer, primary_key=True),
Column('first_name', String(30)),
Column('last_name', String(30)),
Column('email_address', String(30)))
employees.create()
i = Index('employee_name_index',
employees.c.last_name, employees.c.first_name)
i.create()
assert i in employees.indexes
i2 = Index('employee_email_index',
employees.c.email_address, unique=True)
i2.create()
assert i2 in employees.indexes
def test_index_create_camelcase(self):
"""test that mixed-case index identifiers are legal"""
employees = Table('companyEmployees', metadata,
Column('id', Integer, primary_key=True),
Column('firstName', String(30)),
Column('lastName', String(30)),
Column('emailAddress', String(30)))
employees.create()
i = Index('employeeNameIndex',
employees.c.lastName, employees.c.firstName)
i.create()
i = Index('employeeEmailIndex',
employees.c.emailAddress, unique=True)
i.create()
# Check that the table is useable. This is mostly for pg,
# which can be somewhat sticky with mixed-case identifiers
employees.insert().execute(firstName='Joe', lastName='Smith', id=0)
ss = employees.select().execute().fetchall()
assert ss[0].firstName == 'Joe'
assert ss[0].lastName == 'Smith'
def test_index_create_inline(self):
"""Test indexes defined with tables"""
events = Table('events', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(30), index=True, unique=True),
Column('location', String(30), index=True),
Column('sport', String(30)),
Column('announcer', String(30)),
Column('winner', String(30)))
Index('sport_announcer', events.c.sport, events.c.announcer, unique=True)
Index('idx_winners', events.c.winner)
eq_(
set([ ix.name for ix in events.indexes ]),
set(['ix_events_name', 'ix_events_location', 'sport_announcer', 'idx_winners'])
)
self.assert_sql_execution(
testing.db,
lambda: events.create(testing.db),
RegexSQL("^CREATE TABLE events"),
AllOf(
ExactSQL('CREATE UNIQUE INDEX ix_events_name ON events (name)'),
ExactSQL('CREATE INDEX ix_events_location ON events (location)'),
ExactSQL('CREATE UNIQUE INDEX sport_announcer ON events (sport, announcer)'),
ExactSQL('CREATE INDEX idx_winners ON events (winner)')
)
)
# verify that the table is functional
events.insert().execute(id=1, name='hockey finals', location='rink',
sport='hockey', announcer='some canadian',
winner='sweden')
ss = events.select().execute().fetchall()
def test_too_long_idx_name(self):
dialect = testing.db.dialect.__class__()
for max_ident, max_index in [(22, None), (256, 22)]:
dialect.max_identifier_length = max_ident
dialect.max_index_name_length = max_index
for tname, cname, exp in [
('sometable', 'this_name_is_too_long', 'ix_sometable_t_09aa'),
('sometable', 'this_name_alsois_long', 'ix_sometable_t_3cf1'),
]:
t1 = Table(tname, MetaData(),
Column(cname, Integer, index=True),
)
ix1 = list(t1.indexes)[0]
self.assert_compile(
schema.CreateIndex(ix1),
"CREATE INDEX %s "
"ON %s (%s)" % (exp, tname, cname),
dialect=dialect
)
dialect.max_identifier_length = 22
dialect.max_index_name_length = None
t1 = Table('t', MetaData(), Column('c', Integer))
assert_raises(
exc.IdentifierError,
schema.CreateIndex(Index(
"this_other_name_is_too_long_for_what_were_doing",
t1.c.c)).compile,
dialect=dialect
)
class ConstraintCompilationTest(TestBase, AssertsCompiledSQL):
def _test_deferrable(self, constraint_factory):
t = Table('tbl', MetaData(),
Column('a', Integer),
Column('b', Integer),
constraint_factory(deferrable=True))
sql = str(schema.CreateTable(t).compile(bind=testing.db))
assert 'DEFERRABLE' in sql, sql
assert 'NOT DEFERRABLE' not in sql, sql
t = Table('tbl', MetaData(),
Column('a', Integer),
Column('b', Integer),
constraint_factory(deferrable=False))
sql = str(schema.CreateTable(t).compile(bind=testing.db))
assert 'NOT DEFERRABLE' in sql
t = Table('tbl', MetaData(),
Column('a', Integer),
Column('b', Integer),
constraint_factory(deferrable=True, initially='IMMEDIATE'))
sql = str(schema.CreateTable(t).compile(bind=testing.db))
assert 'NOT DEFERRABLE' not in sql
assert 'INITIALLY IMMEDIATE' in sql
t = Table('tbl', MetaData(),
Column('a', Integer),
Column('b', Integer),
constraint_factory(deferrable=True, initially='DEFERRED'))
sql = str(schema.CreateTable(t).compile(bind=testing.db))
assert 'NOT DEFERRABLE' not in sql
assert 'INITIALLY DEFERRED' in sql
def test_deferrable_pk(self):
factory = lambda **kw: PrimaryKeyConstraint('a', **kw)
self._test_deferrable(factory)
def test_deferrable_table_fk(self):
factory = lambda **kw: ForeignKeyConstraint(['b'], ['tbl.a'], **kw)
self._test_deferrable(factory)
def test_deferrable_column_fk(self):
t = Table('tbl', MetaData(),
Column('a', Integer),
Column('b', Integer,
ForeignKey('tbl.a', deferrable=True,
initially='DEFERRED')))
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE tbl (a INTEGER, b INTEGER, FOREIGN KEY(b) REFERENCES tbl (a) DEFERRABLE INITIALLY DEFERRED)",
)
def test_deferrable_unique(self):
factory = lambda **kw: UniqueConstraint('b', **kw)
self._test_deferrable(factory)
def test_deferrable_table_check(self):
factory = lambda **kw: CheckConstraint('a < b', **kw)
self._test_deferrable(factory)
def test_deferrable_column_check(self):
t = Table('tbl', MetaData(),
Column('a', Integer),
Column('b', Integer,
CheckConstraint('a < b',
deferrable=True,
initially='DEFERRED')))
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE tbl (a INTEGER, b INTEGER CHECK (a < b) DEFERRABLE INITIALLY DEFERRED)"
)
def test_use_alter(self):
m = MetaData()
t = Table('t', m,
Column('a', Integer),
)
t2 = Table('t2', m,
Column('a', Integer, ForeignKey('t.a', use_alter=True, name='fk_ta')),
Column('b', Integer, ForeignKey('t.a', name='fk_tb')), # to ensure create ordering ...
)
e = engines.mock_engine(dialect_name='postgresql')
m.create_all(e)
m.drop_all(e)
e.assert_sql([
'CREATE TABLE t (a INTEGER)',
'CREATE TABLE t2 (a INTEGER, b INTEGER, CONSTRAINT fk_tb FOREIGN KEY(b) REFERENCES t (a))',
'ALTER TABLE t2 ADD CONSTRAINT fk_ta FOREIGN KEY(a) REFERENCES t (a)',
'ALTER TABLE t2 DROP CONSTRAINT fk_ta',
'DROP TABLE t2',
'DROP TABLE t'
])
def test_add_drop_constraint(self):
m = MetaData()
t = Table('tbl', m,
Column('a', Integer),
Column('b', Integer)
)
t2 = Table('t2', m,
Column('a', Integer),
Column('b', Integer)
)
constraint = CheckConstraint('a < b',name="my_test_constraint",
deferrable=True,initially='DEFERRED', table=t)
# before we create an AddConstraint,
# the CONSTRAINT comes out inline
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE tbl ("
"a INTEGER, "
"b INTEGER, "
"CONSTRAINT my_test_constraint CHECK (a < b) DEFERRABLE INITIALLY DEFERRED"
")"
)
self.assert_compile(
schema.AddConstraint(constraint),
"ALTER TABLE tbl ADD CONSTRAINT my_test_constraint "
"CHECK (a < b) DEFERRABLE INITIALLY DEFERRED"
)
# once we make an AddConstraint,
# inline compilation of the CONSTRAINT
# is disabled
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE tbl ("
"a INTEGER, "
"b INTEGER"
")"
)
self.assert_compile(
schema.DropConstraint(constraint),
"ALTER TABLE tbl DROP CONSTRAINT my_test_constraint"
)
self.assert_compile(
schema.DropConstraint(constraint, cascade=True),
"ALTER TABLE tbl DROP CONSTRAINT my_test_constraint CASCADE"
)
constraint = ForeignKeyConstraint(["b"], ["t2.a"])
t.append_constraint(constraint)
self.assert_compile(
schema.AddConstraint(constraint),
"ALTER TABLE tbl ADD FOREIGN KEY(b) REFERENCES t2 (a)"
)
constraint = ForeignKeyConstraint([t.c.a], [t2.c.b])
t.append_constraint(constraint)
self.assert_compile(
schema.AddConstraint(constraint),
"ALTER TABLE tbl ADD FOREIGN KEY(a) REFERENCES t2 (b)"
)
constraint = UniqueConstraint("a", "b", name="uq_cst")
t2.append_constraint(constraint)
self.assert_compile(
schema.AddConstraint(constraint),
"ALTER TABLE t2 ADD CONSTRAINT uq_cst UNIQUE (a, b)"
)
constraint = UniqueConstraint(t2.c.a, t2.c.b, name="uq_cs2")
self.assert_compile(
schema.AddConstraint(constraint),
"ALTER TABLE t2 ADD CONSTRAINT uq_cs2 UNIQUE (a, b)"
)
assert t.c.a.primary_key is False
constraint = PrimaryKeyConstraint(t.c.a)
assert t.c.a.primary_key is True
self.assert_compile(
schema.AddConstraint(constraint),
"ALTER TABLE tbl ADD PRIMARY KEY (a)"
)
|
|
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import route53
from boto.ec2 import elb
import ConfigParser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
config = ConfigParser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# ELB
self.elb_enabled = True
if config.has_option('ec2', 'elb'):
self.elb_enabled = config.get_boolean('ec2', 'elb')
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Return all EC2 and RDS instances (if RDS is enabled)
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except ConfigParser.NoOptionError, e:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude');
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except ConfigParser.NoOptionError, e:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs)
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
for x in config.get('ec2', 'instance_filters', '').split(','):
filter_key, filter_value = x.split('=')
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
if self.elb_enabled:
self.get_elb_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.iteritems():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError, e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
print e
sys.exit(1)
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = rds.connect_to_region(region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError, e:
if not e.reason == "Forbidden":
print "Looks like AWS RDS is down: "
print e
sys.exit(1)
def get_instance(self, region, instance_id):
''' Gets details about a specific instance '''
if self.eucalyptus:
conn = boto.connect_euca(self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only want running instances unless all_instances is True
if not self.all_instances and instance.state != 'running':
return
# Select the best destination address
if instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable)
else:
dest = getattr(instance, self.destination_variable)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(dest):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(dest):
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
else:
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.placement, dest)
if self.nested_groups:
self.push_group(self.inventory, region, instance.placement)
# Inventory: Group by instance type
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by security group
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by tag keys
for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by ELBs
if self.elb_enabled:
elb_names = self.get_instance_elb_names(instance)
for name in elb_names:
self.push(self.inventory, name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'elb', name)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
#if instance.subnet_id:
#dest = getattr(instance, self.vpc_destination_variable)
#else:
#dest = getattr(instance, self.destination_variable)
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
else:
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
if self.nested_groups:
self.push_group(self.inventory, region, instance.availability_zone)
# Inventory: Group by instance type
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by security group
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by engine
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_elb_records(self, region):
''' Get and store the map of instance id to ELB resource name '''
self.elb_records = {}
for zone in elb_zones:
elb_conn = elb.ElbConnection(region=region)
for lb in elb_conn.get_all_load_balancers():
elb_name = lb.name
for instance in lb.instances:
self.elb_records.setdefault(instance.id, set())
self.elb_records[instance.id].add(elb_name)
def get_instance_elb_names(self, instance):
''' check if an instance is referenced in the records we have from
ELB. If it is, return the list of ELB Names which serve the instance
referenced. If nothing points to it, return an empty list. '''
lb_list = set()
try:
value = getattr(instance, 'id')
except AttributeError:
pass
return list(self.elb_records.get(value, []))
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif type(value) in [str, unicode]:
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.iteritems():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
|
|
import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
import sys
class T(testbase.ANTLRTest):
def setUp(self):
self.oldPath = sys.path[:]
sys.path.insert(0, self.baseDir)
def tearDown(self):
sys.path = self.oldPath
def parserClass(self, base):
class TParser(base):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TParser
def lexerClass(self, base):
class TLexer(base):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._output = ""
def capture(self, t):
self._output += t
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input):
# no error recovery yet, just crash!
raise
return TLexer
def execParser(self, grammar, grammarEntry, slaves, input):
for slave in slaves:
parserName = self.writeInlineGrammar(slave)[0]
# slave parsers are imported as normal python modules
# to force reloading current version, purge module from sys.modules
if parserName + 'Parser' in sys.modules:
del sys.modules[parserName + 'Parser']
lexerCls, parserCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = parserCls(tStream)
getattr(parser, grammarEntry)()
return parser._output
def execLexer(self, grammar, slaves, input):
for slave in slaves:
parserName = self.writeInlineGrammar(slave)[0]
# slave parsers are imported as normal python modules
# to force reloading current version, purge module from sys.modules
if parserName + 'Parser' in sys.modules:
del sys.modules[parserName + 'Parser']
lexerCls = self.compileInlineGrammar(grammar)
cStream = antlr3.StringStream(input)
lexer = lexerCls(cStream)
while True:
token = lexer.nextToken()
if token is None or token.type == antlr3.EOF:
break
lexer._output += token.text
return lexer._output
def testDelegatorInvokesDelegateRule(self):
slave = textwrap.dedent(
r'''
parser grammar S1;
options {
language=Python3;
}
@members {
def capture(self, t):
self.gM1.capture(t)
}
a : B { self.capture("S.a") } ;
''')
master = textwrap.dedent(
r'''
grammar M1;
options {
language=Python3;
}
import S1;
s : a ;
B : 'b' ; // defines B from inherited token space
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 's',
slaves=[slave],
input="b"
)
self.assertEqual("S.a", found)
def testDelegatorInvokesDelegateRuleWithArgs(self):
slave = textwrap.dedent(
r'''
parser grammar S2;
options {
language=Python3;
}
@members {
def capture(self, t):
self.gM2.capture(t)
}
a[x] returns [y] : B {self.capture("S.a"); $y="1000";} ;
''')
master = textwrap.dedent(
r'''
grammar M2;
options {
language=Python3;
}
import S2;
s : label=a[3] {self.capture($label.y);} ;
B : 'b' ; // defines B from inherited token space
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 's',
slaves=[slave],
input="b"
)
self.assertEqual("S.a1000", found)
def testDelegatorAccessesDelegateMembers(self):
slave = textwrap.dedent(
r'''
parser grammar S3;
options {
language=Python3;
}
@members {
def capture(self, t):
self.gM3.capture(t)
def foo(self):
self.capture("foo")
}
a : B ;
''')
master = textwrap.dedent(
r'''
grammar M3; // uses no rules from the import
options {
language=Python3;
}
import S3;
s : 'b' {self.gS3.foo();} ; // gS is import pointer
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 's',
slaves=[slave],
input="b"
)
self.assertEqual("foo", found)
def testDelegatorInvokesFirstVersionOfDelegateRule(self):
slave = textwrap.dedent(
r'''
parser grammar S4;
options {
language=Python3;
}
@members {
def capture(self, t):
self.gM4.capture(t)
}
a : b {self.capture("S.a");} ;
b : B ;
''')
slave2 = textwrap.dedent(
r'''
parser grammar T4;
options {
language=Python3;
}
@members {
def capture(self, t):
self.gM4.capture(t)
}
a : B {self.capture("T.a");} ; // hidden by S.a
''')
master = textwrap.dedent(
r'''
grammar M4;
options {
language=Python3;
}
import S4,T4;
s : a ;
B : 'b' ;
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 's',
slaves=[slave, slave2],
input="b"
)
self.assertEqual("S.a", found)
def testDelegatesSeeSameTokenType(self):
slave = textwrap.dedent(
r'''
parser grammar S5; // A, B, C token type order
options {
language=Python3;
}
tokens { A; B; C; }
@members {
def capture(self, t):
self.gM5.capture(t)
}
x : A {self.capture("S.x ");} ;
''')
slave2 = textwrap.dedent(
r'''
parser grammar T5;
options {
language=Python3;
}
tokens { C; B; A; } /// reverse order
@members {
def capture(self, t):
self.gM5.capture(t)
}
y : A {self.capture("T.y");} ;
''')
master = textwrap.dedent(
r'''
grammar M5;
options {
language=Python3;
}
import S5,T5;
s : x y ; // matches AA, which should be "aa"
B : 'b' ; // another order: B, A, C
A : 'a' ;
C : 'c' ;
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 's',
slaves=[slave, slave2],
input="aa"
)
self.assertEqual("S.x T.y", found)
def testDelegatorRuleOverridesDelegate(self):
slave = textwrap.dedent(
r'''
parser grammar S6;
options {
language=Python3;
}
@members {
def capture(self, t):
self.gM6.capture(t)
}
a : b {self.capture("S.a");} ;
b : B ;
''')
master = textwrap.dedent(
r'''
grammar M6;
options {
language=Python3;
}
import S6;
b : 'b'|'c' ;
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execParser(
master, 'a',
slaves=[slave],
input="c"
)
self.assertEqual("S.a", found)
# LEXER INHERITANCE
def testLexerDelegatorInvokesDelegateRule(self):
slave = textwrap.dedent(
r'''
lexer grammar S7;
options {
language=Python3;
}
@members {
def capture(self, t):
self.gM7.capture(t)
}
A : 'a' {self.capture("S.A ");} ;
C : 'c' ;
''')
master = textwrap.dedent(
r'''
lexer grammar M7;
options {
language=Python3;
}
import S7;
B : 'b' ;
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execLexer(
master,
slaves=[slave],
input="abc"
)
self.assertEqual("S.A abc", found)
def testLexerDelegatorRuleOverridesDelegate(self):
slave = textwrap.dedent(
r'''
lexer grammar S8;
options {
language=Python3;
}
@members {
def capture(self, t):
self.gM8.capture(t)
}
A : 'a' {self.capture("S.A")} ;
''')
master = textwrap.dedent(
r'''
lexer grammar M8;
options {
language=Python3;
}
import S8;
A : 'a' {self.capture("M.A ");} ;
WS : (' '|'\n') {self.skip()} ;
''')
found = self.execLexer(
master,
slaves=[slave],
input="a"
)
self.assertEqual("M.A a", found)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
""" Sahana Eden Fire Station Model
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3FireStationModel"]
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3FireStationModel(S3Model):
"""
A Model to manage Fire Stations:
http://eden.sahanafoundation.org/wiki/Deployments/Bombeiros
"""
names = ["fire_station",
"fire_station_vehicle",
"fire_water_source",
"fire_hazard_point",
"fire_staff_on_duty"
]
def model(self):
T = current.T
db = current.db
request = current.request
person_id = self.pr_person_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
human_resource_id = self.hrm_human_resource_id
ireport_id = self.irs_ireport_id
vehicle_id = self.vehicle_vehicle_id
s3_utc_represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
add_component = self.add_component
crud_strings = current.response.s3.crud_strings
# =====================================================================
# Fire Station
#
fire_station_types = {
1: T("Fire Station"),
9: T("Unknown type of facility"),
}
tablename = "fire_station"
table = self.define_table(tablename,
self.super_link("site_id", "org_site"),
Field("name", notnull=True, length=64,
label = T("Name")),
Field("code", unique=True, length=64,
label = T("Code")),
Field("facility_type", "integer",
requires = IS_NULL_OR(IS_IN_SET(fire_station_types)),
default = 1,
label = T("Facility Type"),
represent = lambda opt: \
fire_station_types.get(opt, T("not specified"))),
organisation_id(),
location_id(),
Field("phone", label = T("Phone"),
requires = IS_NULL_OR(s3_phone_requires)),
Field("website", label=T("Website"),
#requires = IS_NULL_OR(IS_URL()),
represent = lambda url: s3_url_represent(url)),
Field("email", label = T("Email"),
#requires = IS_NULL_OR(IS_EMAIL())
),
Field("fax", label = T("Fax"),
requires = IS_NULL_OR(s3_phone_requires)),
s3_comments(),
*s3_meta_fields())
self.configure("fire_station",
super_entity="org_site")
station_id = S3ReusableField("station_id", table,
requires = IS_NULL_OR(IS_ONE_OF(db, "fire_station.id", "%(name)s")),
represent = lambda id: (id and [db.fire_station[id].name] or [NONE])[0],
label = T("Station"),
ondelete = "CASCADE")
# CRUD strings
ADD_FIRE_STATION = T("Add Fire Station")
crud_strings[tablename] = Storage(
title_create = ADD_FIRE_STATION,
title_display = T("Fire Station Details"),
title_list = T("Fire Stations"),
title_update = T("Edit Station Details"),
title_search = T("Search for Fire Station"),
title_upload = T("Upload Fire Stations List"),
title_map = T("Map of Fire Stations"),
subtitle_create = T("Add New Fire Station"),
label_list_button = T("List Fire Stations"),
label_create_button = ADD_FIRE_STATION,
label_delete_button = T("Delete Fire Station"),
msg_record_created = T("Fire Station added"),
msg_record_modified = T("Fire Station updated"),
msg_record_deleted = T("Fire Station deleted"),
msg_no_match = T("No Fire Stations could be found"),
msg_list_empty = T("No Fire Stations currently registered"))
add_component("vehicle_vehicle",
fire_station = Storage(link="fire_station_vehicle",
joinby="station_id",
key="vehicle_id",
actuate="replace"))
add_component("fire_shift",
fire_station = "station_id")
add_component("fire_shift_staff",
fire_station = "station_id")
# =====================================================================
# Vehicles of Fire stations
#
tablename = "fire_station_vehicle"
table = self.define_table(tablename,
station_id(),
vehicle_id())
# CRUD strings
ADD_VEHICLE = T("Add Vehicle")
crud_strings[tablename] = Storage(
title_create = ADD_VEHICLE,
title_display = T("Vehicle Details"),
title_list = T("Vehicles"),
title_update = T("Edit Vehicle Details"),
title_search = T("Search for Vehicles"),
title_upload = T("Upload Vehicles List"),
subtitle_create = T("Add New Vehicle"),
label_list_button = T("List Vehicles"),
label_create_button = ADD_VEHICLE,
label_delete_button = T("Delete Vehicle"),
msg_record_created = T("Vehicle added"),
msg_record_modified = T("Vehicle updated"),
msg_record_deleted = T("Vehicle deleted"),
msg_no_match = T("No Vehicles could be found"),
msg_list_empty = T("No Vehicles currently registered"))
self.set_method("fire", "station",
method="vehicle_report",
action=self.vehicle_report)
# =====================================================================
# Water Sources
#
tablename = "fire_water_source"
table = self.define_table(tablename,
Field("name", "string"),
location_id(),
#Field("good_for_human_usage", "boolean"),
#Field("fresh", "boolean"),
#Field("Salt", "boolean"),
#Field("toponymy", "string"),
#Field("parish", "string"),
#Field("type", "string"),
#Field("owner", "string"),
#person_id(),
#organisation_id(),
#Field("shape", "string"),
#Field("diameter", "string"),
#Field("depth", "string"),
#Field("volume", "integer"),
#Field("lenght", "integer"),
#Field("height", "integer"),
#Field("usefull_volume", "integer"),
#Field("catchment", "integer"),
#Field("area", "integer"),
#Field("date", "date"),
#Field("access_type", "string"),
#Field("previews_usage", "boolean"),
#Field("car_access", "string"),
#Field("mid_truck_access", "string"),
#Field("truck_access", "string"),
#Field("distance_from_trees", "integer"),
#Field("distance_from_buildings", "integer"),
#Field("helicopter_access", "string"),
#Field("previews_usage_air", "boolean"),
#Field("car_movment_conditions", "string"),
#Field("midtruck_movment_conditions", "string"),
#Field("truck_movment_conditions", "string"),
#Field("powerline_distance", "integer"),
#Field("distance_other_risks", "integer"),
#Field("anti_seismic_construction", "boolean"),
#Field("isolated_from_air", "boolean"),
#Field("hermetic", "boolean"),
s3_comments(),
*s3_meta_fields())
# =====================================================================
# Hazards
# - this is long-term hazards, not incidents
#
tablename = "fire_hazard_point"
table = self.define_table(tablename,
location_id(),
Field("name", "string"),
# What are the Org & Person for? Contacts?
organisation_id(),
person_id(),
s3_comments(),
*s3_meta_fields())
# =====================================================================
# Shifts
#
tablename = "fire_shift"
table = self.define_table(tablename,
station_id(),
Field("name"),
Field("start_time", "datetime",
requires = IS_UTC_DATETIME_IN_RANGE(),
widget = S3DateTimeWidget(),
default = request.utcnow,
represent = s3_utc_represent),
Field("end_time","datetime",
requires = IS_UTC_DATETIME_IN_RANGE(),
widget = S3DateTimeWidget(),
default = request.utcnow,
represent = s3_utc_represent),
*s3_meta_fields())
shift_id = S3ReusableField("shift_id", table,
requires = IS_NULL_OR(IS_ONE_OF(db, "fire_shift.id",
self.fire_shift_represent)),
represent = self.fire_shift_represent,
label = T("Shift"),
ondelete = "CASCADE")
# ---------------------------------------------------------------------
tablename = "fire_shift_staff"
table = self.define_table(tablename,
station_id(),
#shift_id(),
human_resource_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage(
# used by IRS
fire_staff_on_duty = self.fire_staff_on_duty
)
# -------------------------------------------------------------------------
@staticmethod
def fire_shift_represent(shift):
"""
"""
db = current.db
table = db.fire_shift
if not isinstance(shift, Row):
shift = db(table.id == shift).select(table.start_time,
table.end_time,
limitby=(0, 1)).first()
return "%s - %s" % (shift.start_time, shift.end_time)
# -------------------------------------------------------------------------
@staticmethod
def fire_staff_on_duty(station_id=None):
"""
Return a query for hrm_human_resource filtering
for entries which are linked to a current shift
"""
db = current.db
staff = db.hrm_human_resource
roster = db.fire_shift_staff
query = (staff.id == roster.human_resource_id) & \
(roster.deleted != True)
if station_id is not None:
query &= (roster.station_id == station_id)
return query
# -------------------------------------------------------------------------
@staticmethod
def vehicle_report(r, **attr):
"""
Custom method to provide a report on Vehicle Deployment Times
- this is one of the main tools currently used to manage an Incident
"""
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
station_id = r.id
if station_id:
s3db = current.s3db
dtable = s3db.irs_ireport_vehicle
vtable = s3db.vehicle_vehicle
stable = s3db.fire_station_vehicle
query = (stable.station_id == station_id) & \
(stable.vehicle_id == vtable.id) & \
(vtable.asset_id == dtable.asset_id)
current.response.s3.crud_strings["irs_ireport_vehicle"] = Storage(
title_report = "Vehicle Deployment Times"
)
req = current.manager.parse_request("irs", "ireport_vehicle",
args=["report"],
vars=Storage(
rows = "asset_id",
cols = "ireport_id",
fact = "minutes",
aggregate = "sum"
))
req.set_handler("report", S3Cube())
req.resource.add_filter(query)
return req(rheader=rheader)
# END =========================================================================
|
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transition-in controller parameters."""
from makani.analysis.control import simple_aero
from makani.config import mconfig
from makani.config.m600.control import trans_in_controllers
import numpy as np
@mconfig.Config(deps={
'phys': 'common.physical_constants',
'rotors': mconfig.WING_MODEL + '.rotors',
'tether': mconfig.WING_MODEL + '.tether',
'wing': mconfig.WING_MODEL + '.wing',
'wing_serial': 'common.wing_serial',
})
def MakeParams(params):
controllers = trans_in_controllers.GetControllers(params['wing_serial'])
flap_offset = np.deg2rad(-11.5)
simple_aero_model = {
'dCL_dalpha': 7.23857325,
'CL_0': 1.69398499,
'base_flaps': [flap_offset, flap_offset, flap_offset, flap_offset,
flap_offset, flap_offset, 0.0, 0.0],
'dCL_dflap': [0.31226199, 0.37013073, 0.37070369, 0.37013073,
0.36783890, 0.31111608, 0.54488286, 0.00229183],
'dCY_dbeta': -1.419025,
'CY_0': -0.051620,
'dCD_dalpha': 0.68301,
'CD_0': 0.076590,
}
simple_aero.CheckSimpleAeroModel(
'm600/m600_aswing_baseline_zero_angular_rate.json',
simple_aero_model, 1e-3, flap_offset=flap_offset)
# Maximum aerodynamic climb angle [rad] commanded by the
# longitudinal loop. This value is also used to provide a lower
# limit on the expected lift in lateral control.
max_aero_climb_angle_cmd = np.deg2rad(55.0)
# Compute the maximum CL.
max_delta_flap_cmd = np.deg2rad(10.0)
max_angle_of_attack_cmd = np.deg2rad(1.0)
# TODO: Consider whether trans-in should use the dCL_dflap that was
# added to the simple aero model.
CL_max = (simple_aero_model['CL_0']
+ controllers['dCL_dflap'] * max_delta_flap_cmd
+ simple_aero_model['dCL_dalpha'] * max_angle_of_attack_cmd)
# Calculate the angle [rad] between the body x-axis and the total
# thrust line.
thrust_axis_b = np.zeros((3,))
for i in range(len(params['rotors'])):
thrust_axis_b += params['rotors'][i]['axis']
thrust_axis_b /= len(params['rotors'])
assert thrust_axis_b[1] == 0.0
thrust_pitch = np.arctan2(-thrust_axis_b[2], thrust_axis_b[0])
return {
# Airspeed bias [m/s] and thresholds [m/s] for handling propeller inflow.
#
# We subtract a bias from the airspeed measurement to account for the
# effect of propeller inflow on the Pitot sensor.
'prop_inflow_airspeed_bias': 1.4,
'prop_inflow_low_airspeed': 20.0,
'prop_inflow_high_airspeed': 50.0,
# X position [m] at which to start an early turn.
'turn_start_pos_ti_x': -params['tether']['length'] * np.cos(np.pi/ 4.0),
# Turning radius [m] for the early turn.
'turn_radius': 200.0,
# Course angle [rad] at which to resume straight flight.
'turn_course_angle': -np.pi / 6.0,
'mode': {
# Minimum estimated dynamic pressure [Pa] before starting trans-in.
# TODO: This value sets a minimum airspeed above which
# we can reasonably trust the Pitot pressure sensors.
#
# This threshold was reduced to avoid entering trans-out.
'min_dynamic_pressure': 30.0,
# Minimum time [s] spent in kFlightModeHoverAccel before a
# transition is allowed.
'min_time_in_accel': 2.5,
# Measured acceleration [m/s^2] threshold and time to keep
# accelerating [s]. Transition to kFlightModeTransIn
# requires the measured specific force to drop below this
# value or for max_time_keep_accelerating time to elapse.
'acc_stopped_accelerating_threshold': params['phys']['g'] * 1.05,
'max_time_keep_accelerating': 5.0,
# Minimum pitch angle [rad] before forcing a transition from
# kFlightModeHoverAccel to kFlightModeTransIn.
'min_pitch_angle': -0.5
},
'longitudinal': {
# Aerodynamic climb angle limits [rad].
'min_aero_climb_angle_cmd': np.deg2rad(-10.0),
'max_aero_climb_angle_cmd': max_aero_climb_angle_cmd,
# Angle [rad] between the body x-axis and the thrust line.
'thrust_pitch': thrust_pitch,
# Minimum airspeed [m/s]. Below this airspeed, a linear
# gain is applied to pitch the kite forward.
#
# TODO: This should be a dynamic pressure.
'min_airspeed': 24.5,
# Natural frequency [Hz] for the position loop.
'radial_tracking_freq_hz': 0.1,
# Damping ratio for the radial tracking loop.
'radial_tracking_damping_ratio': 1.25,
# Threshold [m] on the radial error below which additional
# normal force is applied to establish tension.
'tension_control_radial_error_threshold': 20.0,
# Threshold [rad] on the elevation angle above which additional
# normal force is applied to establish tension.
'tension_control_elevation_angle_threshold': 0.7,
# Desired tension [N] on the tether sphere.
'min_tension_cmd': 6000.0,
# Zero angle-of-attack lift coefficient [#], lift slope
# [#/rad] with respect to change in angle-of-attack and flap
# commands.
'CL_0': simple_aero_model['CL_0'],
'dCL_dalpha': simple_aero_model['dCL_dalpha'],
'dCL_dflap': controllers['dCL_dflap'],
# Minimum and maximum delta flap command [rad].
'min_delta_flap_cmd': np.deg2rad(-10.0),
'max_delta_flap_cmd': max_delta_flap_cmd,
# Minimum and maximum angle-of-attack command [rad].
'min_angle_of_attack_cmd': np.deg2rad(-6.0),
'max_angle_of_attack_cmd': max_angle_of_attack_cmd,
# Maximum absolute feed-forward pitch rate [rad/s] to be commanded.
'max_pitch_rate_b_cmd': 0.5,
# Maximum thrust [N].
#
# This thrust command attempts to saturate thrust assuming
# Rev3 propellers, an 800 N-m torque limit, a per-motor
# power limit of 108 kW and a total aerodynamic power limit
# of 730 kW.
'thrust_cmd': 28000.0,
},
'lateral': {
# Maximum expected lift coefficient [#].
'CL_max': CL_max,
'max_aero_climb_angle': max_aero_climb_angle_cmd,
# Reference length [m] for the lateral tracking loop.
#
# The desired lateral tracking bandwidth is given by
# airspeed / (2.0 * pi * lateral_tracking_ref_length).
'lateral_tracking_ref_length': 150.0,
# Maximum bandwidth [Hz] for the lateral tracking loop.
'max_lateral_tracking_freq_hz': 0.035,
# Damping ratio [#] for the desired wing lateral position
# response.
'lateral_tracking_damping_ratio': 0.5,
# Maximum lateral position error [m].
'max_pos_ti_y_err': 30.0,
# Maximum feed-forward yaw-rate [rad/s] that can be commanded.
'max_yaw_rate_ti_cmd': 0.3,
# Maximum absolute roll angle command [rad].
'max_delta_roll_ti_cmd': 0.3,
# Trim angle-of-sideslip, roll angle and yaw angle [rad] in
# the transition-in frame.
'angle_of_sideslip_cmd': controllers['angle_of_sideslip_cmd'],
'roll_ti_cmd': controllers['roll_ti_cmd'],
'yaw_ti_cmd': controllers['yaw_ti_cmd']
},
'attitude': {
# Minimum feed-forward pitch moment [N-m] to carry-over from
# the final kFlightModeHoverAccel commands. The upper bound
# on this moment is zero.
'min_initial_pitch_moment': -5000.0,
# Maximum feed-forward yaw moment magnitude [N-m] to
# carry-over from the final kFlightModeHoverAccel commands.
'max_initial_yaw_moment': 5000.0,
# Maximum angular acceleration [rad/s^2] for the initial
# pitch-forward trajectory.
'pitch_forward_max_pitch_accel': 1.0,
# Maximum pitch rate [rad/s] for the initial pitch-forward
# trajectory.
'pitch_forward_max_pitch_rate': 0.5,
# Pitch angle error [rad] above which no explicit pitch
# forward maneuver is attempted.
'pitch_forward_max_pitch_error': np.deg2rad(-15.0),
# Maximum duration [s] allowed for the pitch-forward maneuver.
'pitch_forward_max_duration': 4.0,
# Elevator trim [rad] for zero angle-of-attack assuming a
# slack tether.
'delta_elevator_alpha_zero': controllers['delta_elevator_alpha_zero'],
# Change in elevator trim per angle-of-attack [rad/rad] assuming
# a slack tether.
'ddelta_elevator_dalpha': controllers['ddelta_elevator_dalpha'],
# Airspeed [m/s] and angle-of-attack [rad] thresholds for
# holding the integrator. The integrator is allowed to
# operate after the first moment where the pitch forward is
# complete, the airspeed is above the first threshold and
# the angle-of-attack is below the second threshold.
'int_release_airspeed_threshold': 15.0,
'int_release_alpha_threshold': 0.15,
# Maximum rate of change of the integrated angle-of-attack
# error [rad].
'max_int_angle_of_attack_rate': 0.1,
# Maximum integrated angle-of-attack error [rad-s].
'max_int_angle_of_attack': 0.25,
# Maximum integrated roll error [rad-s].
'max_int_roll': 0.2,
# Ratio [#] of delta_flap_cmd to apply to the midboard flaps.
'midboard_flap_ratio': 0.5,
# Tension cutoffs [N] for when to use gains for airplane-like
# control (low tension) and on-tether control (high-tension).
'low_tension': 10000.0,
'high_tension': 20000.0,
# Gain matrices for low and high tension. The lateral gains
# are given as:
#
# [motor yaw; ailerons; rudder] =
# K * [roll; yaw; p; r; angle_of_sideslip].
#
# and the longitudinal gains as:
#
# [motor pitch; elevator] = K * [pitch; q; int_angle_of_attack].
#
# The low tension gains are trimmed by placing the kite 20
# meters from full tether extension and trimming to to have
# 2.0 [deg] roll angle, 45 [deg] aerodynamic climb angle, in 0
# [m/s] wind using the m600_vsaero_zero_angular_rate.json
# database. For the high tension gains, the kite's trim
# position was moved outward until until the tether tension
# reached 12 [kN], and merge_databases was set to false.
'lat_gains_pitch_forward': controllers['lat_gains_pitch_forward'],
'lat_gains_low_tension': controllers['lat_gains_low_tension'],
'lat_gains_high_tension': controllers['lat_gains_high_tension'],
'long_gains_pitch_forward': [
[5.08e+03, 10.04e+03, 0.0],
[-0.35, -0.15, 0.0]
],
'long_gains_low_tension': controllers['long_gains_low_tension'],
'long_gains_high_tension': controllers['long_gains_high_tension'],
},
'output': {
# Thrust and moment weights [#] for the least squares solver
# used to set rotor velocities. The relatively high weight
# on roll is to avoid exciting the symmetric torsional mode;
# the trans-in controller always commands zero roll moment.
'thrust_moment_weights': {
'thrust': 1e-3,
'moment': [3.0, 1.0, 1.0]
},
# Flap offsets and lower and upper limits [rad] in the
# standard order: port aileron, center flap, starboard
# aileron, elevator, rudder. The aileron offsets are
# slightly up so there is still some room left for control
# during acceleration as the 0 degree flap position
# corresponds to maximum lift.
#
# The offsets are chosen based on trimming the kite to have
# 2 [deg] roll angle, 45 [deg] aerodynamic climb angle, in 0
# [m/s] wind using the database
# m600_vsaero_zero_angular_rate.json.
#
# Note that the elevator trim command is handled in the
# attitude control parameters.
'flap_offsets': controllers['flap_offsets'],
'lower_flap_limits': [
np.deg2rad(angle) for angle in
[-20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -10.0, -22.0]
],
'upper_flap_limits': [
np.deg2rad(angle) for angle in
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 22.0]
]
}
}
|
|
from functools import partial
from itertools import imap
import string
# Higher order
from standardize_address import expand_standardize_abbr
from standardize_address import lookup_standardize_abbr
from standardize_address import title_case_string
def lower_getter(getter, data):
"""
Lower cases the result of the getter given data.
"""
return string.lower(str(getter(data)))
def lower_list_getter(getter, data):
"""
Lower cases the items in the result of the getter given data.
"""
value = getter(data)
if isinstance(value, tuple):
return map(string.lower, map(str, value))
return string.lower(str(value))
def title_getter(getter, data):
"""
Title cases the result of the getter given data.
"""
return title_case_string(str(getter(data)))
def title_list_getter(getter, data):
"""
Title cases the items in the result of the getter given data.
"""
return map(title_case_string, map(str, getter(data)))
def number_getter(getter, data):
"""
Gets the leading digits from the result of the getter given data.
"""
return get_number_prefix(getter(data))
def join_stripped_gotten_value(sep, getters, data):
"""
Join the values, coerced to str and stripped of whitespace padding,
from entity, gotten with collection of getters,
with the separator.
:param sep: :class: `str` Separator of values.
:param getters: collection of callables takes that data and returns value.
:param data: argument for the getters
"""
return sep.join(
filter(
None,
imap(string.strip,
imap(str,
filter(None, [getter(data) for getter in getters])))))
def join_stripped_values(sep, collection_getter, data):
"""
Join the values, coerced to str and stripped of whitespace padding,
from entity, gotten with collection_getter,
with the separator.
:param sep: :class: `str` Separator of values.
:param collection_getter: callable takes that data and returns collection.
:param data: argument for the collection_getter
"""
value = collection_getter(data)
if not isinstance(value, tuple):
value = (value,)
return sep.join(
filter(
None,
imap(string.strip,
imap(str, filter(None, value)))))
# High order
def get_full_name(name_parts_getter, data):
"""
Space join the non-empty values from data with the name parts getter.
"""
return join_stripped_values(' ', name_parts_getter, data)
def get_phone(phone_parts_getter, data):
"""
Dash join the non-empty values from data with the phone parts getter.
The phone_parts_getter should return
the area code, exchange and last four.
"""
return join_stripped_values('-', phone_parts_getter, data)
def get_zip(zip_parts_getter, data):
"""
Dash join non-empty values from data with the zip parts getter.
"""
return join_stripped_values('-', zip_parts_getter, data)
# Addresses
def get_number_prefix(number):
number = str(number)
if not number:
return ""
try:
number = str(int(number))
except (ValueError, TypeError), e:
digits = []
for digit in number:
if digit in string.digits:
digits.append(digit)
else:
break
number = "".join(digits)
return number
def get_raw_address_label(address_parts_getter, data):
"""
Get the address label for use with in the geocoder.
Space join non-empty parts of the address label
from the data.
"""
return join_stripped_values(' ', address_parts_getter, data)
def get_geocodable_address_label(house_number_getter,
street_name_getter,
data):
"""
Get the address label for use with the geocoder
using separate getters. Space join non-empty parts
"""
value = title_case_string(
expand_standardize_abbr(join_stripped_gotten_value(
' ', (house_number_getter,
street_name_getter), data)))
if "'" in value:
return value.replace("'", "")
return value
def get_address_label(address_parts_getter, data):
return lookup_standardize_abbr(
get_raw_address_label(address_parts_getter, data))
def get_address(address_label_getter,
city_getter,
state_getter,
zip_parts_getter,
data):
"""
Get the address for use in the geocoder.
Comma-space join non-empty parts of the address
from the data.
"""
return [join_stripped_gotten_value(
', ', (
address_label_getter,
city_getter,
state_getter,
partial(get_zip, zip_parts_getter)
),
data)]
def get_separated_address(address_label_getter,
city_getter,
state_getter,
zip_parts_getter,
data):
"""
Get the address for use in the geocoder.
Comma-space join non-empty parts of the address
from the data.
"""
return (address_label_getter(data),
city_getter(data),
state_getter(data),
get_zip(zip_parts_getter, data))
# get_geocoder_address just passes in the get_raw_address_label
# get_full_address passes in address label
# getter that runs lookup_standardize_abbr on the value.
def get_unit(unit_parts_getter, data):
return join_stripped_values(' ', unit_parts_getter, data)
def get_zip_road(zip5_getter, road_getter, data):
return join_stripped_gotten_value('|', (zip5_getter, road_getter), data)
|
|
import unittest
from tempfile import mkdtemp
from os.path import join
from shutil import rmtree
from json import dumps, loads
import copy
from .settings import SPEC_DATA_DIR
from slyd.gitstorage.repoman import Repoman
def j(json):
return dumps(json, sort_keys=True, indent=4)
class RepomanTest(unittest.TestCase):
def setUp(self):
self.temp_repos_dir = mkdtemp(dir=SPEC_DATA_DIR,
prefix='test-run-')
Repoman.setup(
storage_backend='dulwich.fsrepo.FsRepo',
location=self.temp_repos_dir
)
def tearDown(self):
rmtree(self.temp_repos_dir)
def get_full_name(self, repo_name):
return join(self.temp_repos_dir, repo_name)
def test_create(self):
Repoman.create_repo(self.get_full_name('my_repo'))
self.assertTrue(Repoman.repo_exists(self.get_full_name('my_repo')))
def test_save_file(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
contents = j({'a': 1})
repoman.save_file('f1', contents, 'testbranch')
self.assertEqual(['f1'], repoman.list_files_for_branch('testbranch'))
self.assertEqual(
contents, repoman.file_contents_for_branch('f1', 'testbranch'))
def test_delete_file(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
contents = j({'a': 1})
repoman.save_file('f1', contents, 'testbranch')
repoman.delete_file('f1', 'testbranch')
self.assertEqual([], repoman.list_files_for_branch('testbranch'))
def test_branch_ops(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
repoman.create_branch('b1')
self.assertTrue(repoman.has_branch('b1'))
self.assertEqual(len(repoman.get_branch('b1')), 40)
repoman.delete_branch('b1')
self.assertFalse(repoman.has_branch('b1'))
def test_simple_publish(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
f1, f2, f3 = j({'a': 1}), j({'b': 2}), j({'c': 3})
repoman.save_file('f1', f1, 'b1')
repoman.save_file('f2', f2, 'b1')
repoman.save_file('x/f3', f3, 'b1')
repoman.save_file('f4', '{}', 'b1')
repoman.delete_file('f4', 'b1')
self.assertTrue(repoman.has_branch('b1'))
self.assertTrue(repoman.has_branch('master'))
self.assertEqual([], repoman.list_files_for_branch('master'))
self.assertTrue(repoman.publish_branch('b1'))
self.assertItemsEqual(['f1', 'f2', 'x/f3'],
repoman.list_files_for_branch('master'))
self.assertEqual([f1, f2, f3],
[repoman.file_contents_for_branch(x, 'b1')
for x in ('f1', 'f2', 'x/f3')])
self.assertEqual([f1, f2, f3],
[repoman.file_contents_for_branch(x, 'master')
for x in ('f1', 'f2', 'x/f3')])
# Only one published revision
self.assertEqual(len(repoman.get_published_revisions()), 1)
# 6 checkpoints, 1 per operation (5) + 1 for the original state.
self.assertEqual(len(repoman.get_branch_checkpoints('b1')), 6)
def test_sequential_publishes(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
f1, f2 = j({'a': 1}), j({'b': 2})
repoman.save_file('f1', f1, 'b1')
repoman.save_file('x/f2', f2, 'b1')
repoman.publish_branch('b1')
repoman.delete_branch('b1')
# f1 is modified in branch b2
f1 = j({'a': 3})
repoman.save_file('f1', f1, 'b2')
self.assertTrue(repoman.publish_branch('b2'))
self.assertEqual([f1, f2],
[repoman.file_contents_for_branch(x, 'master')
for x in ('f1', 'x/f2')])
self.assertEqual(len(repoman.get_published_revisions()), 2)
def test_two_interleaved_publishes_1(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
f1, f2 = j({'a': 1}), j({'b': 2})
repoman.save_file('f1', f1, 'b1')
repoman.save_file('x/f2', f2, 'b1')
# branch b2 modifies the same files concurrently
f1, f2 = j({'c': 3}), j({'d': 4})
repoman.save_file('f1', f1, 'b2')
repoman.save_file('x/f2', f2, 'b2')
# both publish their changes, but the automerge should solve conflicts
self.assertTrue(repoman.publish_branch('b1'))
self.assertTrue(repoman.publish_branch('b2'))
self.assertEqual(j({'a': 1, 'c': 3}),
repoman.file_contents_for_branch('f1', 'master'))
self.assertEqual(j({'b': 2, 'd': 4}),
repoman.file_contents_for_branch('x/f2', 'master'))
self.assertEqual(len(repoman.get_published_revisions()), 2)
def test_two_interleaved_publishes_2(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
f1 = j({'a': 1, 'c': 3})
repoman.save_file('f1', f1, 'b1')
self.assertTrue(repoman.publish_branch('b1'))
repoman.delete_branch('b1')
# b1 adds x/f2.
f2 = j({'b': 2})
repoman.save_file('x/f2', f2, 'b1')
# branch b2 adds a file with the same name but different content
f2 = j({'a': 2, 'c': {'d': 1}})
repoman.save_file('x/f2', f2, 'b2')
repoman.delete_file('f1', 'b2')
# both publish their changes, but the automerge should solve conflicts
self.assertTrue(repoman.publish_branch('b1'))
self.assertTrue(repoman.publish_branch('b2'))
self.assertEqual(j({'a': 2, 'b': 2, 'c': {'d': 1}}),
repoman.file_contents_for_branch('x/f2', 'master'))
self.assertEqual(len(repoman.get_published_revisions()), 3)
@unittest.skip('Broken, TODO check') # TODO
def test_two_interleaved_publishes_3(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
f1 = j({'a': 1, 'c': 3, 'd': 4, 'e': 5})
repoman.save_file('f1', f1, 'b1')
self.assertTrue(repoman.publish_branch('b1'))
repoman.delete_branch('b1')
# b1 heavily edits f1
repoman.save_file('f1', j({'b': 2, 'e': 5}), 'b1')
# this case is VERY tricky. branch 2 renames f1 to f2 and changes
# it a bit. The merge algorithm detects the rename and the merged
# output ends up containing all b1 changes + all b2 changes, and the
# file is stored under the name given by branch2
repoman.delete_file('f1', 'b2')
repoman.save_file('f2', j({'a': 1, 'c': 3, 'd': 4, 'e': 6}), 'b2')
# both publish their changes, but the automerge should solve conflicts
self.assertTrue(repoman.publish_branch('b1'))
self.assertTrue(repoman.publish_branch('b2'))
self.assertEqual(j({'b': 2, 'e': 6}),
repoman.file_contents_for_branch('f2', 'master'))
self.assertEqual(len(repoman.get_published_revisions()), 3)
def test_modify_delete(self):
# Although this is usually treated as a conflict, here we just keep the
# modified version and ignore the delete.
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
repoman.save_file('f1', j({'a': 1}), 'b1')
self.assertTrue(repoman.publish_branch('b1'))
repoman.delete_branch('b1')
# b1 deletes f1 and b2 modifies it.
repoman.delete_file('f1', 'b1')
repoman.save_file('f1', j({'a': 2, 'c': 3}), 'b2')
self.assertTrue(repoman.publish_branch('b1'))
self.assertTrue(repoman.publish_branch('b2'))
# master has f1.
self.assertEqual(['f1'], repoman.list_files_for_branch('master'))
self.assertEqual(j({'a': 2, 'c': 3}),
repoman.file_contents_for_branch('f1', 'master'))
def test_unresolved_conflicts_both_modify(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
repoman.save_file('f1', j({'a': 1}), 'b1')
self.assertTrue(repoman.publish_branch('b1'))
repoman.delete_branch('b1')
# both branches update the same key of the same file with different
# values. This conflict must be manually resolved
repoman.save_file('f1', j({'a': 2}), 'b1')
repoman.save_file('f1', j({'a': 3}), 'b2')
self.assertTrue(repoman.publish_branch('b1'))
self.assertFalse(repoman.publish_branch('b2'))
# the file appears as published by b1 in the master branch
self.assertEqual(j({'a': 2}),
repoman.file_contents_for_branch('f1', 'master'))
# the file in b2 has an unresolved conflict
self.assertIn('__CONFLICT',
j(repoman.file_contents_for_branch('f1', 'b2')))
# b2 solves the conflict, saves again and forces the publish
repoman.save_file('f1', j({'a': 3}), 'b2')
self.assertTrue(repoman.publish_branch('b2', force=True))
self.assertEqual(j({'a': 3}),
repoman.file_contents_for_branch('f1', 'master'))
def test_unresolved_conflicts_both_add(self):
repoman = Repoman.create_repo(self.get_full_name('my_repo'))
# both add the same file with a conflicting key
repoman.save_file('f1', j({'a': 1}), 'b1')
repoman.save_file('f1', j({'a': 2}), 'b2')
self.assertTrue(repoman.publish_branch('b1'))
self.assertFalse(repoman.publish_branch('b2'))
# the file appears as published by b1 in the master branch
self.assertEqual(j({'a': 1}),
repoman.file_contents_for_branch('f1', 'master'))
# the file in b2 has an unresolved conflict
self.assertIn('__CONFLICT',
j(repoman.file_contents_for_branch('f1', 'b2')))
|
|
"""A formatter which formats phone numbers as they are entered.
An AsYouTypeFormatter can be created by invoking
AsYouTypeFormatter(region_code). After that digits can be added by invoking
input_digit() on the formatter instance, and the partially formatted phone
number will be returned each time a digit is added. clear() should be invoked
before a new number needs to be formatted.
See the unit tests for more details on how the formatter is to be used.
"""
# Based on original Java code:
# java/src/com/google/i18n/phonenumbers/AsYouTypeFormatter.java
# Copyright (C) 2009-2011 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .util import u, unicod, U_EMPTY_STRING, U_SPACE
from .unicode_util import digit as unicode_digit
from .re_util import fullmatch
from .phonemetadata import PhoneMetadata
from .phonenumberutil import _VALID_PUNCTUATION, REGION_CODE_FOR_NON_GEO_ENTITY
from .phonenumberutil import _PLUS_SIGN, _PLUS_CHARS_PATTERN
from .phonenumberutil import _extract_country_code, region_code_for_country_code
from .phonenumberutil import country_code_for_region
from .phonenumberutil import _formatting_rule_has_first_group_only
# Character used when appropriate to separate a prefix, such as a long NDD or
# a country calling code, from the national number.
_SEPARATOR_BEFORE_NATIONAL_NUMBER = U_SPACE
_EMPTY_METADATA = PhoneMetadata(id=unicod(""),
international_prefix=unicod("NA"),
register=False)
# A pattern that is used to match character classes in regular expressions. An
# example of a character class is [1-4].
_CHARACTER_CLASS_PATTERN = re.compile(unicod("\\[([^\\[\\]])*\\]"))
# Any digit in a regular expression that actually denotes a digit. For
# example, in the regular expression 80[0-2]\d{6,10}, the first 2 digits (8
# and 0) are standalone digits, but the rest are not.
# Two look-aheads are needed because the number following \\d could be a
# two-digit number, since the phone number can be as long as 15 digits.
_STANDALONE_DIGIT_PATTERN = re.compile(unicod("\\d(?=[^,}][^,}])"))
# A set of characters that, if found in a national prefix formatting rules, are an indicator to
# us that we should separate the national prefix from the number when formatting.
_NATIONAL_PREFIX_SEPARATORS_PATTERN = re.compile("[- ]")
# A pattern that is used to determine if a number_format under
# available_formats is eligible to be used by the AYTF. It is eligible when
# the format element under number_format contains groups of the dollar sign
# followed by a single digit, separated by valid phone number
# punctuation. This prevents invalid punctuation (such as the star sign in
# Israeli star numbers) getting into the output of the AYTF.
_ELIGIBLE_FORMAT_PATTERN = re.compile(unicod("[") + _VALID_PUNCTUATION + unicod("]*") +
unicod("(\\\\\\d") + unicod("[") + _VALID_PUNCTUATION + unicod("]*)+"))
# This is the minimum length of national number accrued that is required to
# trigger the formatter. The first element of the leading_digits_pattern of each
# number_format contains a regular expression that matches up to this number of
# digits.
_MIN_LEADING_DIGITS_LENGTH = 3
# The digits that have not been entered yet will be represented by a \u2008,
# the punctuation space.
_DIGIT_PLACEHOLDER = u("\u2008")
_DIGIT_PATTERN = re.compile(_DIGIT_PLACEHOLDER)
def _get_metadata_for_region(region_code):
"""The metadata needed by this class is the same for all regions
sharing the same country calling code. Therefore, we return the
metadata for "main" region for this country calling code."""
country_calling_code = country_code_for_region(region_code)
main_country = region_code_for_country_code(country_calling_code)
# Set to a default instance of the metadata. This allows us to
# function with an incorrect region code, even if formatting only
# works for numbers specified with "+".
return PhoneMetadata.metadata_for_region(main_country, _EMPTY_METADATA)
class AsYouTypeFormatter(object):
def __init__(self, region_code):
"""Gets an AsYouTypeFormatter for the specific region.
Arguments:
region_code -- The region where the phone number is being entered
Return an AsYouTypeFormatter} object, which could be used to format
phone numbers in the specific region "as you type"
"""
self._clear()
self._default_country = region_code.upper()
self._current_metadata = _get_metadata_for_region(self._default_country)
self._default_metadata = self._current_metadata
def _maybe_create_new_template(self):
"""Returns True if a new template is created as opposed to reusing the existing template.
When there are multiple available formats, the formatter uses the
first format where a formatting template could be created.
"""
ii = 0
while ii < len(self._possible_formats):
number_format = self._possible_formats[ii]
pattern = number_format.pattern
if self._current_formatting_pattern == pattern:
return False
if self._create_formatting_template(number_format):
self._current_formatting_pattern = pattern
if number_format.national_prefix_formatting_rule is None:
self._should_add_space_after_national_prefix = False
else:
self._should_add_space_after_national_prefix = bool(_NATIONAL_PREFIX_SEPARATORS_PATTERN.search(number_format.national_prefix_formatting_rule))
# With a new formatting template, the matched position using
# the old template needs to be reset.
self._last_match_position = 0
return True
else:
# Remove the current number format from _possible_formats
del self._possible_formats[ii]
ii -= 1
ii += 1
self._able_to_format = False
return False
def _get_available_formats(self, leading_digits):
if (self._is_complete_number and
len(self._current_metadata.intl_number_format) > 0):
number_formats = self._current_metadata.intl_number_format
else:
number_formats = self._current_metadata.number_format
national_prefix_is_used_by_country = (self._current_metadata.national_prefix is not None)
for this_format in number_formats:
if (not national_prefix_is_used_by_country or self._is_complete_number or
this_format.national_prefix_optional_when_formatting or
_formatting_rule_has_first_group_only(this_format.national_prefix_formatting_rule)):
if self._is_format_eligible(this_format.format):
self._possible_formats.append(this_format)
self._narrow_down_possible_formats(leading_digits)
def _is_format_eligible(self, format):
return fullmatch(_ELIGIBLE_FORMAT_PATTERN, format)
def _narrow_down_possible_formats(self, leading_digits):
index_of_leading_digits_pattern = len(leading_digits) - _MIN_LEADING_DIGITS_LENGTH
ii = 0
while ii < len(self._possible_formats):
num_format = self._possible_formats[ii]
ii += 1
if len(num_format.leading_digits_pattern) == 0:
# Keep everything that isn't restricted by leading digits.
continue
last_leading_digits_pattern = min(index_of_leading_digits_pattern,
len(num_format.leading_digits_pattern) - 1)
leading_digits_pattern = re.compile(num_format.leading_digits_pattern[last_leading_digits_pattern])
m = leading_digits_pattern.match(leading_digits)
if not m:
# remove the element we've just examined, now at (ii-1)
ii -= 1
self._possible_formats.pop(ii)
def _create_formatting_template(self, num_format):
number_pattern = num_format.pattern
# The formatter doesn't format numbers when number_pattern contains
# "|", e.g. (20|3)\d{4}. In those cases we quickly return.
if number_pattern.find('|') != -1:
return False
# Replace anything in the form of [..] with \d
number_pattern = re.sub(_CHARACTER_CLASS_PATTERN, unicod("\\\\d"), number_pattern)
# Replace any standalone digit (not the one in d{}) with \d
number_pattern = re.sub(_STANDALONE_DIGIT_PATTERN, unicod("\\\\d"), number_pattern)
self.formatting_template = U_EMPTY_STRING
temp_template = self._get_formatting_template(number_pattern, num_format.format)
if len(temp_template) > 0:
self._formatting_template = temp_template
return True
return False
def _get_formatting_template(self, number_pattern, number_format):
"""Gets a formatting template which can be used to efficiently
format a partial number where digits are added one by one."""
# Create a phone number consisting only of the digit 9 that matches the
# number_pattern by applying the pattern to the longest_phone_number string.
longest_phone_number = unicod("999999999999999")
number_re = re.compile(number_pattern)
m = number_re.search(longest_phone_number) # this will always succeed
a_phone_number = m.group(0)
# No formatting template can be created if the number of digits
# entered so far is longer than the maximum the current formatting
# rule can accommodate.
if len(a_phone_number) < len(self._national_number):
return U_EMPTY_STRING
# Formats the number according to number_format
template = re.sub(number_pattern, number_format, a_phone_number)
# Replaces each digit with character _DIGIT_PLACEHOLDER
template = re.sub("9", _DIGIT_PLACEHOLDER, template)
return template
def _clear(self):
"""Clears the internal state of the formatter, so it can be reused."""
self._current_output = U_EMPTY_STRING
self._accrued_input = U_EMPTY_STRING
self._accrued_input_without_formatting = U_EMPTY_STRING
self._formatting_template = U_EMPTY_STRING
self._last_match_position = 0
# The pattern from number_format that is currently used to create
# formatting_template.
self._current_formatting_pattern = U_EMPTY_STRING
# This contains anything that has been entered so far preceding the
# national significant number, and it is formatted (e.g. with space
# inserted). For example, this can contain IDD, country code, and/or
# NDD, etc.
self._prefix_before_national_number = U_EMPTY_STRING
self._should_add_space_after_national_prefix = False
# This contains the national prefix that has been extracted. It
# contains only digits without formatting.
self._extracted_national_prefix = U_EMPTY_STRING
self._national_number = U_EMPTY_STRING
# This indicates whether AsYouTypeFormatter is currently doing the
# formatting.
self._able_to_format = True
# Set to true when users enter their own
# formatting. AsYouTypeFormatter will do no formatting at all when
# this is set to True.
self._input_has_formatting = False
# The position of a digit upon which input_digit(remember_position=True) is
# most recently invoked, as found in accrued_input_without_formatting.
self._position_to_remember = 0
# The position of a digit upon which input_digit(remember_position=True) is
# most recently invoked, as found in the original sequence of
# characters the user entered.
self._original_position = 0
# This is set to true when we know the user is entering a full
# national significant number, since we have either detected a
# national prefix or an international dialing prefix. When this is
# true, we will no longer use local number formatting patterns.
self._is_complete_number = False
self._is_expecting_country_calling_code = False
self._possible_formats = []
def clear(self):
"""Clears the internal state of the formatter, so it can be reused."""
self._clear()
if self._current_metadata != self._default_metadata:
self._current_metadata = _get_metadata_for_region(self._default_country)
def input_digit(self, next_char, remember_position=False):
"""Formats a phone number on-the-fly as each digit is entered.
If remember_position is set, remembers the position where next_char is
inserted, so that it can be retrieved later by using
get_remembered_position. The remembered position will be automatically
adjusted if additional formatting characters are later
inserted/removed in front of next_char.
Arguments:
next_char -- The most recently entered digit of a phone
number. Formatting characters are allowed, but as soon as they
are encountered this method formats the number as entered and
not "as you type" anymore. Full width digits and Arabic-indic
digits are allowed, and will be shown as they are.
remember_position -- Whether to track the position where next_char is
inserted.
Returns the partially formatted phone number.
"""
self._accrued_input += next_char
if remember_position:
self._original_position = len(self._accrued_input)
# We do formatting on-the-fly only when each character entered is
# either a digit, or a plus sign (accepted at the start of the number
# only).
if not self._is_digit_or_leading_plus_sign(next_char):
self._able_to_format = False
self._input_has_formatting = True
else:
next_char = self._normalize_and_accrue_digits_and_plus_sign(next_char, remember_position)
if not self._able_to_format:
# When we are unable to format because of reasons other than that
# formatting chars have been entered, it can be due to really long
# IDDs or NDDs. If that is the case, we might be able to do
# formatting again after extracting them.
if self._input_has_formatting:
self._current_output = self._accrued_input
return self._current_output
elif self._attempt_to_extract_idd():
if self._attempt_to_extract_ccc():
self._current_output = self._attempt_to_choose_pattern_with_prefix_extracted()
return self._current_output
elif self._able_to_extract_longer_ndd():
# Add an additional space to separate long NDD and national
# significant number for readability. We don't set
# should_add_space_after_national_prefix to True, since we don't
# want this to change later when we choose formatting
# templates.
self._prefix_before_national_number += _SEPARATOR_BEFORE_NATIONAL_NUMBER
self._current_output = self._attempt_to_choose_pattern_with_prefix_extracted()
return self._current_output
self._current_output = self._accrued_input
return self._current_output
# We start to attempt to format only when at least
# MIN_LEADING_DIGITS_LENGTH digits (the plus sign is counted as a
# digit as well for this purpose) have been entered.
len_input = len(self._accrued_input_without_formatting)
if len_input >= 0 and len_input <= 2:
self._current_output = self._accrued_input
return self._current_output
elif len_input == 3:
if self._attempt_to_extract_idd():
self._is_expecting_country_calling_code = True
else:
# No IDD or plus sign is found, might be entering in national format.
self._extracted_national_prefix = self._remove_national_prefix_from_national_number()
self._current_output = self._attempt_to_choose_formatting_pattern()
return self._current_output
if self._is_expecting_country_calling_code:
if self._attempt_to_extract_ccc():
self._is_expecting_country_calling_code = False
self._current_output = self._prefix_before_national_number + self._national_number
return self._current_output
if len(self._possible_formats) > 0: # The formatting patterns are already chosen.
temp_national_number = self._input_digit_helper(next_char)
# See if the accrued digits can be formatted properly already. If
# not, use the results from input_digit_helper, which does
# formatting based on the formatting pattern chosen.
formatted_number = self._attempt_to_format_accrued_digits()
if len(formatted_number) > 0:
self._current_output = formatted_number
return self._current_output
self._narrow_down_possible_formats(self._national_number)
if self._maybe_create_new_template():
self._current_output = self._input_accrued_national_number()
return self._current_output
if self._able_to_format:
self._current_output = self._append_national_number(temp_national_number)
return self._current_output
else:
self._current_output = self._accrued_input
return self._current_output
else:
self._current_output = self._attempt_to_choose_formatting_pattern()
return self._current_output
def _attempt_to_choose_pattern_with_prefix_extracted(self):
self._able_to_format = True
self._is_expecting_country_calling_code = False
self._possible_formats = []
self._last_match_position = 0
self._formatting_template = U_EMPTY_STRING
self._current_formatting_pattern = U_EMPTY_STRING
return self._attempt_to_choose_formatting_pattern()
# Some national prefixes are a substring of others. If extracting the
# shorter NDD doesn't result in a number we can format, we try to see if
# we can extract a longer version here.
def _able_to_extract_longer_ndd(self):
if len(self._extracted_national_prefix) > 0:
# Put the extracted NDD back to the national number before
# attempting to extract a new NDD.
self._national_number = self._extracted_national_prefix + self._national_number
# Remove the previously extracted NDD from
# prefixBeforeNationalNumber. We cannot simply set it to empty
# string because people sometimes incorrectly enter national
# prefix after the country code, e.g. +44 (0)20-1234-5678.
index_of_previous_ndd = self._prefix_before_national_number.rfind(self._extracted_national_prefix)
self._prefix_before_national_number = self._prefix_before_national_number[:index_of_previous_ndd]
return self._extracted_national_prefix != self._remove_national_prefix_from_national_number()
def _is_digit_or_leading_plus_sign(self, next_char):
return (next_char.isdigit() or
(len(self._accrued_input) == 1 and
fullmatch(_PLUS_CHARS_PATTERN, next_char)))
def _attempt_to_format_accrued_digits(self):
"""Check to see if there is an exact pattern match for these digits. If so, we should use this
instead of any other formatting template whose leadingDigitsPattern also matches the input.
"""
for number_format in self._possible_formats:
num_re = re.compile(number_format.pattern)
if fullmatch(num_re, self._national_number):
if number_format.national_prefix_formatting_rule is None:
self._should_add_space_after_national_prefix = False
else:
self._should_add_space_after_national_prefix = bool(_NATIONAL_PREFIX_SEPARATORS_PATTERN.search(number_format.national_prefix_formatting_rule))
formatted_number = re.sub(num_re, number_format.format, self._national_number)
return self._append_national_number(formatted_number)
return U_EMPTY_STRING
def get_remembered_position(self):
"""Returns the current position in the partially formatted phone
number of the character which was previously passed in as the
parameter of input_digit(remember_position=True)."""
if not self._able_to_format:
return self._original_position
accrued_input_index = 0
current_output_index = 0
while (accrued_input_index < self._position_to_remember and
current_output_index < len(self._current_output)):
if (self._accrued_input_without_formatting[accrued_input_index] ==
self._current_output[current_output_index]):
accrued_input_index += 1
current_output_index += 1
return current_output_index
def _append_national_number(self, national_number):
"""Combines the national number with any prefix (IDD/+ and country
code or national prefix) that was collected. A space will be inserted
between them if the current formatting template indicates this to be
suitable.
"""
prefix_before_nn_len = len(self._prefix_before_national_number)
if (self._should_add_space_after_national_prefix and prefix_before_nn_len > 0 and
self._prefix_before_national_number[-1] != _SEPARATOR_BEFORE_NATIONAL_NUMBER):
# We want to add a space after the national prefix if the national
# prefix formatting rule indicates that this would normally be
# done, with the exception of the case where we already appended a
# space because the NDD was surprisingly long.
return self._prefix_before_national_number + _SEPARATOR_BEFORE_NATIONAL_NUMBER + national_number
else:
return self._prefix_before_national_number + national_number
def _attempt_to_choose_formatting_pattern(self):
"""Attempts to set the formatting template and returns a string which
contains the formatted version of the digits entered so far."""
# We start to attempt to format only when at least MIN_LEADING_DIGITS_LENGTH digits of national
# number (excluding national prefix) have been entered.
if len(self._national_number) >= _MIN_LEADING_DIGITS_LENGTH:
self._get_available_formats(self._national_number)
# See if the accrued digits can be formatted properly already.
formatted_number = self._attempt_to_format_accrued_digits()
if len(formatted_number) > 0:
return formatted_number
if self._maybe_create_new_template():
return self._input_accrued_national_number()
else:
return self._accrued_input
else:
return self._append_national_number(self._national_number)
def _input_accrued_national_number(self):
"""Invokes input_digit_helper on each digit of the national number
accrued, and returns a formatted string in the end."""
length_of_national_number = len(self._national_number)
if length_of_national_number > 0:
temp_national_number = U_EMPTY_STRING
for ii in range(length_of_national_number):
temp_national_number = self._input_digit_helper(self._national_number[ii])
if self._able_to_format:
return self._append_national_number(temp_national_number)
else:
return self._accrued_input
else:
return self._prefix_before_national_number
def _is_nanpa_number_with_national_prefix(self):
"""Returns true if the current country is a NANPA country and the
national number begins with the national prefix.
"""
# For NANPA numbers beginning with 1[2-9], treat the 1 as the national
# prefix. The reason is that national significant numbers in NANPA
# always start with [2-9] after the national prefix. Numbers
# beginning with 1[01] can only be short/emergency numbers, which
# don't need the national prefix.
return (self._current_metadata.country_code == 1 and self._national_number[0] == '1' and
self._national_number[1] != '0' and self._national_number[1] != '1')
def _remove_national_prefix_from_national_number(self):
start_of_national_number = 0
if self._is_nanpa_number_with_national_prefix():
start_of_national_number = 1
self._prefix_before_national_number += unicod("1") + _SEPARATOR_BEFORE_NATIONAL_NUMBER
self._is_complete_number = True
elif self._current_metadata.national_prefix_for_parsing is not None:
npp_re = re.compile(self._current_metadata.national_prefix_for_parsing)
m = npp_re.match(self._national_number)
# Since some national prefix patterns are entirely optional, check
# that a national prefix could actually be extracted.
if m and m.end() > 0:
# When the national prefix is detected, we use international
# formatting rules instead of national ones, because national
# formatting rules could contain local formatting rules for
# numbers entered without area code.
self._is_complete_number = True
start_of_national_number = m.end()
self._prefix_before_national_number += self._national_number[:start_of_national_number]
national_prefix = self._national_number[:start_of_national_number]
self._national_number = self._national_number[start_of_national_number:]
return national_prefix
def _attempt_to_extract_idd(self):
"""Extracts IDD and plus sign to self._prefix_before_national_number
when they are available, and places the remaining input into
_national_number.
Returns True when accrued_input_without_formatting begins with the plus sign or valid IDD for
default_country.
"""
international_prefix = re.compile(unicod("\\") + _PLUS_SIGN + unicod("|") +
(self._current_metadata.international_prefix or U_EMPTY_STRING))
idd_match = international_prefix.match(self._accrued_input_without_formatting)
if idd_match:
self._is_complete_number = True
start_of_country_calling_code = idd_match.end()
self._national_number = self._accrued_input_without_formatting[start_of_country_calling_code:]
self._prefix_before_national_number = self._accrued_input_without_formatting[:start_of_country_calling_code]
if self._accrued_input_without_formatting[0] != _PLUS_SIGN:
self._prefix_before_national_number += _SEPARATOR_BEFORE_NATIONAL_NUMBER
return True
return False
def _attempt_to_extract_ccc(self):
"""Extracts the country calling code from the beginning of
_national_number to _prefix_before_national_number when they are
available, and places the remaining input into _national_number.
Returns True when a valid country calling code can be found.
"""
if len(self._national_number) == 0:
return False
country_code, number_without_ccc = _extract_country_code(self._national_number)
if country_code == 0:
return False
self._national_number = number_without_ccc
new_region_code = region_code_for_country_code(country_code)
if new_region_code == REGION_CODE_FOR_NON_GEO_ENTITY:
self._current_metadata = PhoneMetadata.metadata_for_nongeo_region(country_code)
elif new_region_code != self._default_country:
self._current_metadata = _get_metadata_for_region(new_region_code)
self._prefix_before_national_number += str(country_code)
self._prefix_before_national_number += _SEPARATOR_BEFORE_NATIONAL_NUMBER
# When we have successfully extracted the IDD, the previously
# extracted NDD should be cleared because it is no longer valid.
self._extracted_national_prefix = U_EMPTY_STRING
return True
def _normalize_and_accrue_digits_and_plus_sign(self, next_char, remember_position):
"""Accrues digits and the plus sign to
_accrued_input_without_formatting for later use. If next_char contains
a digit in non-ASCII format (e.g. the full-width version of digits),
it is first normalized to the ASCII version. The return value is
next_char itself, or its normalized version, if next_char is a digit
in non-ASCII format. This method assumes its input is either a digit
or the plus sign."""
if next_char == _PLUS_SIGN:
normalized_char = next_char
self._accrued_input_without_formatting += next_char
else:
next_digit = unicode_digit(next_char, -1)
if next_digit != -1:
normalized_char = unicod(next_digit)
else: # pragma no cover
normalized_char = next_char
self._accrued_input_without_formatting += normalized_char
self._national_number += normalized_char
if remember_position:
self._position_to_remember = len(self._accrued_input_without_formatting)
return normalized_char
def _input_digit_helper(self, next_char):
# Note that formattingTemplate is not guaranteed to have a value, it
# could be empty, e.g. when the next digit is entered after extracting
# an IDD or NDD.
digit_match = _DIGIT_PATTERN.search(self._formatting_template, self._last_match_position)
if digit_match:
# Reset to search for _DIGIT_PLACEHOLDER from start of string
digit_match = _DIGIT_PATTERN.search(self._formatting_template)
temp_template = re.sub(_DIGIT_PATTERN,
next_char,
self._formatting_template,
count=1)
self._formatting_template = temp_template + self._formatting_template[len(temp_template):]
self._last_match_position = digit_match.start()
return self._formatting_template[:self._last_match_position + 1]
else:
if len(self._possible_formats) == 1:
# More digits are entered than we could handle, and there are
# no other valid patterns to try.
self._able_to_format = False
# else, we just reset the formatting pattern.
self._current_formatting_pattern = U_EMPTY_STRING
return self._accrued_input
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Cloud Datastore helper functions.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
import errno
import logging
import sys
import time
from builtins import object
from socket import error as SocketError
from future.builtins import next
from past.builtins import unicode
# pylint: disable=ungrouped-imports
from apache_beam.internal.gcp import auth
from apache_beam.utils import retry
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from google.rpc import code_pb2
from googledatastore import PropertyFilter, CompositeFilter
from googledatastore import helper as datastore_helper
from googledatastore.connection import Datastore
from googledatastore.connection import RPCError
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
# pylint: enable=ungrouped-imports
_LOGGER = logging.getLogger(__name__)
def key_comparator(k1, k2):
"""A comparator for Datastore keys.
Comparison is only valid for keys in the same partition. The comparison here
is between the list of paths for each key.
"""
if k1.partition_id != k2.partition_id:
raise ValueError('Cannot compare keys with different partition ids.')
k2_iter = iter(k2.path)
for k1_path in k1.path:
k2_path = next(k2_iter, None)
if not k2_path:
return 1
result = compare_path(k1_path, k2_path)
if result != 0:
return result
k2_path = next(k2_iter, None)
if k2_path:
return -1
return 0
def compare_path(p1, p2):
"""A comparator for key path.
A path has either an `id` or a `name` field defined. The
comparison works with the following rules:
1. If one path has `id` defined while the other doesn't, then the
one with `id` defined is considered smaller.
2. If both paths have `id` defined, then their ids are compared.
3. If no `id` is defined for both paths, then their `names` are compared.
"""
result = (p1.kind > p2.kind) - (p1.kind < p2.kind)
if result != 0:
return result
if p1.HasField('id'):
if not p2.HasField('id'):
return -1
return (p1.id > p2.id) - (p1.id < p2.id)
if p2.HasField('id'):
return 1
return (p1.name > p2.name) - (p1.name < p2.name)
def get_datastore(project):
"""Returns a Cloud Datastore client."""
credentials = auth.get_service_credentials()
return Datastore(project, credentials, host='batch-datastore.googleapis.com')
def make_request(project, namespace, query):
"""Make a Cloud Datastore request for the given query."""
req = datastore_pb2.RunQueryRequest()
req.partition_id.CopyFrom(make_partition(project, namespace))
req.query.CopyFrom(query)
return req
def make_partition(project, namespace):
"""Make a PartitionId for the given project and namespace."""
partition = entity_pb2.PartitionId()
partition.project_id = project
if namespace is not None:
partition.namespace_id = namespace
return partition
def retry_on_rpc_error(exception):
"""A retry filter for Cloud Datastore RPCErrors."""
if isinstance(exception, RPCError):
err_code = exception.code
# TODO(BEAM-2156): put these codes in a global list and use that instead.
# https://cloud.google.com/datastore/docs/concepts/errors#error_codes
return err_code in [
code_pb2.ABORTED,
code_pb2.DEADLINE_EXCEEDED,
code_pb2.INTERNAL,
code_pb2.UNAVAILABLE,
code_pb2.UNKNOWN,
]
if isinstance(exception, SocketError):
return (
exception.errno == errno.ECONNRESET or
exception.errno == errno.ETIMEDOUT or exception.errno == errno.EPIPE)
return False
def fetch_entities(project, namespace, query, datastore):
"""A helper method to fetch entities from Cloud Datastore.
Args:
project: Project ID
namespace: Cloud Datastore namespace
query: Query to be read from
datastore: Cloud Datastore Client
Returns:
An iterator of entities.
"""
return QueryIterator(project, namespace, query, datastore)
def is_key_valid(key):
"""Returns True if a Cloud Datastore key is complete.
A key is complete if its last element has either an id or a name.
"""
if not key.path:
return False
return key.path[-1].HasField('id') or key.path[-1].HasField('name')
def write_mutations(
datastore,
project,
mutations,
throttler,
rpc_stats_callback=None,
throttle_delay=1):
"""A helper function to write a batch of mutations to Cloud Datastore.
If a commit fails, it will be retried upto 5 times. All mutations in the
batch will be committed again, even if the commit was partially successful.
If the retry limit is exceeded, the last exception from Cloud Datastore will
be raised.
Args:
datastore: googledatastore.connection.Datastore
project: str, project id
mutations: list of google.cloud.proto.datastore.v1.datastore_pb2.Mutation
rpc_stats_callback: a function to call with arguments `successes` and
`failures` and `throttled_secs`; this is called to record successful
and failed RPCs to Datastore and time spent waiting for throttling.
throttler: AdaptiveThrottler, to use to select requests to be throttled.
throttle_delay: float, time in seconds to sleep when throttled.
Returns a tuple of:
CommitResponse, the response from Datastore;
int, the latency of the successful RPC in milliseconds.
"""
commit_request = datastore_pb2.CommitRequest()
commit_request.mode = datastore_pb2.CommitRequest.NON_TRANSACTIONAL
commit_request.project_id = project
for mutation in mutations:
commit_request.mutations.add().CopyFrom(mutation)
@retry.with_exponential_backoff(
num_retries=5, retry_filter=retry_on_rpc_error)
def commit(request):
# Client-side throttling.
while throttler.throttle_request(time.time() * 1000):
_LOGGER.info(
"Delaying request for %ds due to previous failures", throttle_delay)
time.sleep(throttle_delay)
rpc_stats_callback(throttled_secs=throttle_delay)
try:
start_time = time.time()
response = datastore.commit(request)
end_time = time.time()
rpc_stats_callback(successes=1)
throttler.successful_request(start_time * 1000)
commit_time_ms = int((end_time - start_time) * 1000)
return response, commit_time_ms
except (RPCError, SocketError):
if rpc_stats_callback:
rpc_stats_callback(errors=1)
raise
response, commit_time_ms = commit(commit_request)
return response, commit_time_ms
def make_latest_timestamp_query(namespace):
"""Make a Query to fetch the latest timestamp statistics."""
query = query_pb2.Query()
if namespace is None:
query.kind.add().name = '__Stat_Total__'
else:
query.kind.add().name = '__Stat_Ns_Total__'
# Descending order of `timestamp`
datastore_helper.add_property_orders(query, "-timestamp")
# Only get the latest entity
query.limit.value = 1
return query
def make_kind_stats_query(namespace, kind, latest_timestamp):
"""Make a Query to fetch the latest kind statistics."""
kind_stat_query = query_pb2.Query()
if namespace is None:
kind_stat_query.kind.add().name = '__Stat_Kind__'
else:
kind_stat_query.kind.add().name = '__Stat_Ns_Kind__'
kind_filter = datastore_helper.set_property_filter(
query_pb2.Filter(), 'kind_name', PropertyFilter.EQUAL, unicode(kind))
timestamp_filter = datastore_helper.set_property_filter(
query_pb2.Filter(), 'timestamp', PropertyFilter.EQUAL, latest_timestamp)
datastore_helper.set_composite_filter(
kind_stat_query.filter,
CompositeFilter.AND,
kind_filter,
timestamp_filter)
return kind_stat_query
class QueryIterator(object):
"""A iterator class for entities of a given query.
Entities are read in batches. Retries on failures.
"""
# Maximum number of results to request per query.
_BATCH_SIZE = 500
def __init__(self, project, namespace, query, datastore):
self._query = query
self._datastore = datastore
self._project = project
self._namespace = namespace
self._start_cursor = None
self._limit = self._query.limit.value or sys.maxsize
self._req = make_request(project, namespace, query)
@retry.with_exponential_backoff(
num_retries=5, retry_filter=retry_on_rpc_error)
def _next_batch(self):
"""Fetches the next batch of entities."""
if self._start_cursor is not None:
self._req.query.start_cursor = self._start_cursor
# set batch size
self._req.query.limit.value = min(self._BATCH_SIZE, self._limit)
resp = self._datastore.run_query(self._req)
return resp
def __iter__(self):
more_results = True
while more_results:
resp = self._next_batch()
for entity_result in resp.batch.entity_results:
yield entity_result.entity
self._start_cursor = resp.batch.end_cursor
num_results = len(resp.batch.entity_results)
self._limit -= num_results
# Check if we need to read more entities.
# True when query limit hasn't been satisfied and there are more entities
# to be read. The latter is true if the response has a status
# `NOT_FINISHED` or if the number of results read in the previous batch
# is equal to `_BATCH_SIZE` (all indications that there is more data be
# read).
more_results = ((self._limit > 0) and (
(num_results == self._BATCH_SIZE) or
(resp.batch.more_results == query_pb2.QueryResultBatch.NOT_FINISHED)))
|
|
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import six
from neutron.tests import base
from networking_cisco.plugins.saf.common import config
from networking_cisco.plugins.saf.common import constants
from networking_cisco.plugins.saf.db import dfa_db_models as dbm
from networking_cisco.plugins.saf.server import cisco_dfa_rest as cdr
from networking_cisco.plugins.saf.server import dfa_events_handler as deh
from networking_cisco.plugins.saf.server import dfa_fail_recovery as dfr
from networking_cisco.plugins.saf.server import dfa_instance_api as dia
FAKE_NETWORK_NAME = 'test_dfa_network'
FAKE_NETWORK_ID = '949fdd05-a26a-4819-a829-9fc2285de6ff'
FAKE_CFG_PROF_ID = '8c30f360ffe948109c28ab56f69a82e1'
FAKE_SEG_ID = 12345
FAKE_PROJECT_NAME = 'test_dfa_project'
FAKE_PROJECT_ID = 'aee5da7e699444889c662cf7ec1c8de7'
FAKE_CFG_PROFILE_NAME = 'defaultNetworkL2Profile'
FAKE_INSTANCE_NAME = 'test_dfa_instance'
FAKE_SUBNET_ID = '1a3c5ee1-cb92-4fd8-bff1-8312ac295d64'
FAKE_PORT_ID = 'ea0d92cf-d0cb-4ed2-bbcf-ed7c6aaea4cb'
FAKE_DEVICE_ID = '20305657-78b7-48f4-a7cd-1edf3edbfcad'
FAKE_SECURITY_GRP_ID = '4b5b387d-cf21-4594-b926-f5a5c602295f'
FAKE_MAC_ADDR = 'fa:16:3e:70:15:c4'
FAKE_IP_ADDR = '23.24.25.4'
FAKE_GW_ADDR = '23.24.25.1'
FAKE_DHCP_IP_START = '23.24.25.2'
FAKE_DHCP_IP_END = '23.24.25.254'
FAKE_HOST_ID = 'test_dfa_host'
FAKE_FWD_MODE = 'proxy-gateway'
FAKE_DCNM_USERNAME = 'cisco'
FAKE_DCNM_PASSWD = 'password'
FAKE_DCNM_IP = '1.1.2.2'
class FakeClass(object):
"""Fake class"""
@classmethod
def imitate(cls, *others):
for other in others:
for name in other.__dict__:
try:
setattr(cls, name, mock.Mock())
except (TypeError, AttributeError):
pass
return cls
class FakeProject(object):
"""Fake Project class."""
def __init__(self, proj_id, name, dci_id, desc):
self.id = proj_id
self.name = name
self.dci_id = dci_id
self.description = desc
class TestDFAServer(base.BaseTestCase):
"""Test cases for DFA Server class."""
def setUp(self):
super(TestDFAServer, self).setUp()
# Mocking some modules
self.dcnmpatcher = mock.patch(cdr.__name__ + '.DFARESTClient')
self.mdcnm = self.dcnmpatcher.start()
self.addCleanup(self.dcnmpatcher.stop)
self.keys_patcher = mock.patch(deh.__name__ + '.EventsHandler')
self.mkeys = self.keys_patcher.start()
self.addCleanup(self.mkeys.stop)
self.inst_api_patcher = mock.patch(dia.__name__ + '.DFAInstanceAPI')
self.m_inst_api = self.inst_api_patcher.start()
self.addCleanup(self.inst_api_patcher.stop)
self.module_patcher = mock.patch.dict('sys.modules',
{'pika': mock.Mock()})
self.module_patcher.start()
self.addCleanup(self.module_patcher.stop)
from networking_cisco.plugins.saf.server import dfa_listen_dcnm as dld
from networking_cisco.plugins.saf.server import dfa_server as ds
self.dld_patcher = mock.patch(dld.__name__ + '.DCNMListener')
self.dld = self.dld_patcher.start()
self.addCleanup(self.dld_patcher.stop)
ds.DfaServer.__bases__ = (FakeClass.imitate(
dfr.DfaFailureRecovery, dbm.DfaDBMixin),)
ds.DfaServer.get_all_projects.return_value = []
ds.DfaServer.get_all_networks.return_value = []
ds.DfaServer._setup_rpc = mock.Mock()
# Setting DCNM credentials.
config.default_dcnm_opts['dcnm']['dcnm_ip'] = FAKE_DCNM_IP
config.default_dcnm_opts['dcnm']['dcnm_user'] = FAKE_DCNM_USERNAME
config.default_dcnm_opts['dcnm']['dcnm_password'] = FAKE_DCNM_PASSWD
config.default_dcnm_opts['dcnm']['timeout_resp'] = 0.01
config.default_dcnm_opts['dcnm']['segmentation_id_min'] = 10000
config.default_dcnm_opts['dcnm']['segmentation_id_max'] = 20000
self.cfg = config.CiscoDFAConfig().cfg
self.segid = self.cfg.dcnm.segmentation_id_min + 10
self.dfa_server = ds.DfaServer(self.cfg)
self.dciid = str(123)
self.proj_desc = 'Unit Test Project'
projs = [
FakeProject(FAKE_PROJECT_ID, FAKE_PROJECT_NAME,
self.dciid, self.proj_desc)]
self.dfa_server.get_all_projects.return_value = projs
self.dfa_server._load_project_info_cache()
self.part_name = self.cfg.dcnm.default_partition_name
def _get_port_info(self):
port_info = {'port': {
'status': 'ACTIVE',
'binding:host_id': FAKE_HOST_ID,
'allowed_address_pairs': [],
'extra_dhcp_opts': [],
'device_owner': 'compute:nova',
'binding:profile': {},
'fixed_ips': [{'subnet_id': FAKE_SUBNET_ID,
'ip_address': FAKE_IP_ADDR}],
'id': FAKE_PORT_ID,
'security_groups': [FAKE_SECURITY_GRP_ID],
'device_id': FAKE_DEVICE_ID,
'name': '',
'admin_state_up': True,
'network_id': FAKE_NETWORK_ID,
'tenant_id': FAKE_PROJECT_ID,
'binding:vif_details': {'port_filter': True,
'ovs_hybrid_plug': True},
'binding:vnic_type': 'normal',
'binding:vif_type': 'ovs',
'mac_address': FAKE_MAC_ADDR}}
return port_info
def _load_network_info(self):
dnet = mock.Mock()
dnet.network_id = FAKE_NETWORK_ID
segid = self.cfg.dcnm.segmentation_id_min + 10
dnet.segmentation_id = segid
dnet.config_profile = FAKE_CFG_PROFILE_NAME
dnet.fwd_mod = FAKE_FWD_MODE
dnet.tenant_id = FAKE_PROJECT_ID
dnet.name = FAKE_NETWORK_NAME
self.dfa_server.get_all_networks.return_value = [dnet]
self.dfa_server._load_network_info()
def test_update_project_info_cache(self):
"""Test case for update project info."""
pid = FAKE_PROJECT_ID
name = FAKE_PROJECT_NAME
dciid = 1000
result = constants.RESULT_SUCCESS
self.dfa_server.update_project_info_cache(pid, dci_id=dciid,
name=name, opcode='add')
self.assertTrue(self.dfa_server.add_project_db.called)
self.assertFalse(self.dfa_server.update_project_entry.called)
self.assertFalse(self.dfa_server.del_project_db.called)
self.dfa_server.add_project_db.assert_called_with(pid, name,
dciid, result)
self.dfa_server.update_project_info_cache(pid, dci_id=dciid,
name=name,
opcode='update')
self.assertTrue(self.dfa_server.update_project_entry.called)
self.assertFalse(self.dfa_server.del_project_db.called)
self.dfa_server.update_project_entry.assert_called_with(pid, dciid,
result)
def test_project_create_event(self):
"""Test case for project create event."""
dciid = str(12345)
proj_desc = 'Unit Test Project'
proj_info = {'resource_info': FAKE_PROJECT_ID}
proj = mock.Mock()
proj.name = FAKE_PROJECT_NAME
proj.description = proj_desc
part_name = self.cfg.dcnm.default_partition_name
self.dfa_server.keystone_event._service.projects.get.return_value = (
proj)
self.dfa_server.project_create_event(proj_info)
# Try it with DCI id
proj.name = FAKE_PROJECT_NAME + ':dci_id:' + dciid
self.dfa_server.project_create_event(proj_info)
expected_calls = [
mock.call(FAKE_PROJECT_NAME, part_name, None, proj.description),
mock.call(FAKE_PROJECT_NAME, part_name, dciid, proj.description)]
self.assertEqual(
expected_calls,
self.dfa_server.dcnm_client.create_project.call_args_list)
def test_project_update_event(self):
"""Test case for project update event."""
proj_info = {'resource_info': FAKE_PROJECT_ID}
proj = mock.Mock()
proj.name = FAKE_PROJECT_NAME + ':dci_id:' + self.dciid
proj.description = self.proj_desc
self.dfa_server.keystone_event._service.projects.get.return_value = (
proj)
self.dfa_server.project_update_event(proj_info)
# Project update event is called with the same parameters. It is
# expected that there is no call to update_project.
self.assertFalse(
self.dfa_server.dcnm_client.update_project.called)
# Try with updating the project by name.
proj.name = FAKE_PROJECT_NAME + 'new' + ':dci_id:' + self.dciid
self.dfa_server.project_update_event(proj_info)
self.assertFalse(
self.dfa_server.dcnm_client.update_project.called)
# Try with updating the dci_id of the project.
proj.name = FAKE_PROJECT_NAME + ':dci_id:' + str(124)
self.dfa_server.project_update_event(proj_info)
self.assertTrue(
self.dfa_server.dcnm_client.update_project.called)
expected_calls = [mock.call(FAKE_PROJECT_NAME,
self.cfg.dcnm.default_partition_name,
str(124))]
self.assertEqual(
expected_calls,
self.dfa_server.dcnm_client.update_project.call_args_list)
def test_project_delete_event(self):
"""Test case for project delete event."""
proj_name = FAKE_PROJECT_NAME
proj_info = {'resource_info': FAKE_PROJECT_ID}
part_name = self.cfg.dcnm.default_partition_name
self.dfa_server.project_delete_event(proj_info)
# Check information sent to dcnm and api that deleting the entry from
# DB is called.
self.dfa_server.dcnm_client.delete_project.assert_called_with(
proj_name, part_name)
self.dfa_server.del_project_db.assert_called_with(FAKE_PROJECT_ID)
def test_network_create_event(self):
"""Test case for network create event."""
network_info = {'network':
{'name': FAKE_NETWORK_NAME,
'tenant_id': FAKE_PROJECT_ID,
'id': FAKE_NETWORK_ID}}
dcnmclnt = self.dfa_server.dcnm_client
dcnmclnt.get_config_profile_for_network.return_value = (
FAKE_CFG_PROFILE_NAME, FAKE_FWD_MODE)
self.dfa_server.network_create_event(network_info)
dfa_net = self.dfa_server.network[FAKE_NETWORK_ID]
expected_calls = [mock.call(FAKE_NETWORK_ID, dfa_net, 'openstack',
constants.RESULT_SUCCESS)]
self.assertEqual(expected_calls,
self.dfa_server.add_network_db.call_args_list)
def test_subnet_create_event(self):
"""Test case for subnet create event."""
network_info = {'network':
{'name': FAKE_NETWORK_NAME,
'tenant_id': FAKE_PROJECT_ID,
'id': FAKE_NETWORK_ID}}
subnet_info = {'subnet': {
'network_id': FAKE_NETWORK_ID,
'tenant_id': FAKE_PROJECT_ID,
'allocation_pools': [
{'start': FAKE_DHCP_IP_START, 'end': FAKE_DHCP_IP_END}],
'gateway_ip': FAKE_GW_ADDR,
'ip_version': 4,
'cidr': FAKE_IP_ADDR + '/24',
'id': FAKE_SUBNET_ID}}
dcnmclnt = self.dfa_server.dcnm_client
dcnmclnt.get_config_profile_for_network.return_value = (
FAKE_CFG_PROFILE_NAME, FAKE_FWD_MODE)
self.dfa_server.network_create_event(network_info)
fake_network = mock.Mock()
fake_network.source = 'dcnm'
fake_network.name = FAKE_NETWORK_NAME
self.dfa_server.get_network.return_value = fake_network
self.dfa_server.subnet_create_event(subnet_info)
self.assertFalse(self.dfa_server.dcnm_client.create_network.called)
fake_network.source = 'openstack'
self.dfa_server.subnet_create_event(subnet_info)
self.assertTrue(self.dfa_server.dcnm_client.create_network.called)
create_call = self.dfa_server.dcnm_client.create_network.call_args
arg1, arg2 = create_call
self.assertTrue(arg1[0] == FAKE_PROJECT_NAME)
self.assertTrue(
arg1[1].__dict__ == self.dfa_server.network[FAKE_NETWORK_ID])
self.assertTrue(
arg1[2].__dict__ == self.dfa_server.subnet[FAKE_SUBNET_ID])
def test_network_delete_event(self):
"""Test case for network delete event."""
self._load_network_info()
self.assertFalse(self.segid in self.dfa_server.segmentation_pool)
network_info = {'network_id': FAKE_NETWORK_ID}
self.dfa_server.network_delete_event(network_info)
self.assertTrue(self.dfa_server.dcnm_client.delete_network.called)
dcall = self.dfa_server.dcnm_client.delete_network.call_args
arg1, arg2 = dcall
self.assertTrue(arg1[0] == FAKE_PROJECT_NAME)
self.assertTrue(arg1[1].name == FAKE_NETWORK_NAME)
self.assertTrue(arg1[1].segmentation_id == self.segid)
self.assertTrue(self.segid in self.dfa_server.segmentation_pool)
self.assertTrue(self.dfa_server.delete_network_db.called)
def test_dcnm_network_create_event(self):
"""Test case for DCNM network create event."""
network_info = {'segmentation_id': FAKE_SEG_ID,
'project_name': FAKE_PROJECT_NAME,
'partition_name': self.part_name}
self.dfa_server.get_network_by_segid.return_value = None
self.dfa_server.get_project_id.return_value = FAKE_PROJECT_ID
dcnm_network = {'segmentId': FAKE_SEG_ID,
'profileName': FAKE_CFG_PROFILE_NAME,
'networkName': FAKE_NETWORK_NAME,
'organizationName': FAKE_PROJECT_NAME,
'dhcpScope': None,
'netmaskLength': 24,
'gateway': FAKE_GW_ADDR}
self.dfa_server.dcnm_client.get_network.return_value = dcnm_network
dcnmclnt = self.dfa_server.dcnm_client
dcnmclnt.config_profile_fwding_mode_get.return_value = FAKE_FWD_MODE
self.dfa_server.dcnm_network_create_event(network_info)
# Check the results.
self.dfa_server.dcnm_client.get_network.assert_called_with(
FAKE_PROJECT_NAME, FAKE_SEG_ID)
for netid, dcnmnet in six.iteritems(self.dfa_server.network):
self.dfa_server.add_network_db.assert_called_with(
netid, dcnmnet, 'DCNM', constants.RESULT_SUCCESS)
self.assertTrue(self.dfa_server.neutronclient.create_network.called)
net_ext_name = self.cfg.dcnm.dcnm_net_ext
call_args = self.dfa_server.neutronclient.create_network.call_args
cargs, ckwargs = call_args
net_name = ckwargs.get('body').get('network').get('name')
self.assertTrue(net_name == (
FAKE_NETWORK_NAME + net_ext_name + str(FAKE_SEG_ID)))
self.assertTrue(self.dfa_server.neutronclient.create_subnet.called)
def test_dcnm_network_delete_event(self):
"""Test case for DCNM network delete event."""
self._load_network_info()
network_info = {'segmentation_id': (
self.dfa_server.network[FAKE_NETWORK_ID]['segmentation_id'])}
dcnmnet = mock.Mock()
dcnmnet.network_id = FAKE_NETWORK_ID
self.dfa_server.get_network_by_segid.return_value = dcnmnet
self.dfa_server.dcnm_network_delete_event(network_info)
# Check the results.
self.assertTrue(self.dfa_server.network == {})
self.dfa_server.neutronclient.delete_network.assert_called_with(
FAKE_NETWORK_ID)
self.dfa_server.delete_network_db.assert_called_with(FAKE_NETWORK_ID)
def test_port_create_event(self):
"""Test case for port create event."""
port_info = self._get_port_info()
self._load_network_info()
self.dfa_server._inst_api.get_instance_for_uuid.return_value = (
FAKE_INSTANCE_NAME)
self.dfa_server.port_create_event(port_info)
# Check the output/calls
self.assertTrue(self.dfa_server.neutron_event.send_vm_info.called)
call_args = self.dfa_server.neutron_event.send_vm_info.call_args
cargs, ckwargs = call_args
self.assertTrue(cargs[0] == FAKE_HOST_ID)
self.assertTrue(str(self.dfa_server.port[FAKE_PORT_ID]) == cargs[1])
self.assertTrue(self.dfa_server.add_vms_db.called)
call_args = self.dfa_server.add_vms_db.call_args
cargs, ckwargs = call_args
self.assertTrue(self.dfa_server.port[FAKE_PORT_ID] == cargs[0])
self.assertTrue(constants.RESULT_SUCCESS == cargs[1])
def test_port_update_event(self):
"""Test case for port update event."""
port_info = self._get_port_info()
mvm = mock.Mock()
mvm.host = None
mvm.port_id = FAKE_PORT_ID
self.dfa_server.get_vm.return_value = mvm
self.dfa_server._inst_api.get_instance_for_uuid.return_value = (
FAKE_INSTANCE_NAME)
self.dfa_server.port_update_event(port_info)
# Check the results.
self.dfa_server.neutron_event.send_vm_info.assert_called_with(
port_info['port']['binding:host_id'],
str(self.dfa_server.port[port_info['port']['id']]))
params = dict(columns=dict(
instance_id=FAKE_DEVICE_ID.replace('-', ''),
host=port_info['port']['binding:host_id'],
result=constants.RESULT_SUCCESS,
name=FAKE_INSTANCE_NAME))
self.dfa_server.update_vm_db.assert_called_with(
port_info['port']['id'], **params)
def test_port_delete_event(self):
"""Test case for port delete event."""
vm = mock.Mock()
vm.mac = FAKE_MAC_ADDR
vm.port_id = FAKE_PORT_ID
vm.segmentation_id = self.segid
vm.network_id = FAKE_NETWORK_ID,
vm.port_id = FAKE_PORT_ID
vm.ip = FAKE_IP_ADDR
vm.gw_mac = FAKE_GW_ADDR
vm.instance_id = FAKE_DEVICE_ID
vm.fwd_mod = FAKE_FWD_MODE
vm.host = FAKE_HOST_ID
vm.name = FAKE_INSTANCE_NAME
self.dfa_server.get_vm.return_value = vm
vm_info = dict(status='down', vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host, port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=vm.ip, vm_name=vm.name,
vm_uuid=vm.instance_id, gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod, oui_id='cisco'))
port_info = {'port_id': FAKE_PORT_ID}
# Check the output/calls
self.dfa_server.port_delete_event(port_info)
self.assertTrue(self.dfa_server.neutron_event.send_vm_info.called)
call_args = self.dfa_server.neutron_event.send_vm_info.call_args
cargs, ckwargs = call_args
self.assertTrue(cargs[0] == FAKE_HOST_ID)
self.assertTrue(str(vm_info) == cargs[1])
self.dfa_server.delete_vm_db.assert_called_with(vm.instance_id)
|
|
# Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
import sys
import time
import platform
from contextlib import contextmanager
"""This is (mostly) a standalone module used to write logging
information about Meson runs. Some output goes to screen,
some to logging dir and some goes to both."""
def _windows_ansi():
from ctypes import windll, byref
from ctypes.wintypes import DWORD
kernel = windll.kernel32
stdout = kernel.GetStdHandle(-11)
mode = DWORD()
if not kernel.GetConsoleMode(stdout, byref(mode)):
return False
# ENABLE_VIRTUAL_TERMINAL_PROCESSING == 0x4
# If the call to enable VT processing fails (returns 0), we fallback to
# original behavior
return kernel.SetConsoleMode(stdout, mode.value | 0x4) or os.environ.get('ANSICON')
if platform.system().lower() == 'windows':
colorize_console = os.isatty(sys.stdout.fileno()) and _windows_ansi()
else:
colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('TERM') != 'dumb'
log_dir = None
log_file = None
log_fname = 'meson-log.txt'
log_depth = 0
log_timestamp_start = None
log_fatal_warnings = False
def initialize(logdir, fatal_warnings=False):
global log_dir, log_file, log_fatal_warnings
log_dir = logdir
log_file = open(os.path.join(logdir, log_fname), 'w', encoding='utf8')
log_fatal_warnings = fatal_warnings
def set_timestamp_start(start):
global log_timestamp_start
log_timestamp_start = start
def shutdown():
global log_file
if log_file is not None:
path = log_file.name
exception_around_goer = log_file
log_file = None
exception_around_goer.close()
return path
return None
class AnsiDecorator:
plain_code = "\033[0m"
def __init__(self, text, code, quoted=False):
self.text = text
self.code = code
self.quoted = quoted
def get_text(self, with_codes):
text = self.text
if with_codes:
text = self.code + self.text + AnsiDecorator.plain_code
if self.quoted:
text = '"{}"'.format(text)
return text
def bold(text, quoted=False):
return AnsiDecorator(text, "\033[1m", quoted=quoted)
def red(text):
return AnsiDecorator(text, "\033[1;31m")
def green(text):
return AnsiDecorator(text, "\033[1;32m")
def yellow(text):
return AnsiDecorator(text, "\033[1;33m")
def cyan(text):
return AnsiDecorator(text, "\033[1;36m")
def process_markup(args, keep):
arr = []
if log_timestamp_start is not None:
arr = ['[{:.3f}]'.format(time.monotonic() - log_timestamp_start)]
for arg in args:
if arg is None:
continue
if isinstance(arg, str):
arr.append(arg)
elif isinstance(arg, AnsiDecorator):
arr.append(arg.get_text(keep))
else:
arr.append(str(arg))
return arr
def force_print(*args, **kwargs):
iostr = io.StringIO()
kwargs['file'] = iostr
print(*args, **kwargs)
raw = iostr.getvalue()
if log_depth > 0:
prepend = '|' * log_depth
raw = prepend + raw.replace('\n', '\n' + prepend, raw.count('\n') - 1)
# _Something_ is going to get printed.
try:
print(raw, end='')
except UnicodeEncodeError:
cleaned = raw.encode('ascii', 'replace').decode('ascii')
print(cleaned, end='')
def debug(*args, **kwargs):
arr = process_markup(args, False)
if log_file is not None:
print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.
log_file.flush()
def log(*args, **kwargs):
arr = process_markup(args, False)
if log_file is not None:
print(*arr, file=log_file, **kwargs) # Log file never gets ANSI codes.
log_file.flush()
if colorize_console:
arr = process_markup(args, True)
force_print(*arr, **kwargs)
def _log_error(severity, *args, **kwargs):
from .mesonlib import get_error_location_string
from .environment import build_filename
from .mesonlib import MesonException
if severity == 'warning':
args = (yellow('WARNING:'),) + args
elif severity == 'error':
args = (red('ERROR:'),) + args
elif severity == 'deprecation':
args = (red('DEPRECATION:'),) + args
else:
assert False, 'Invalid severity ' + severity
location = kwargs.pop('location', None)
if location is not None:
location_file = os.path.join(location.subdir, build_filename)
location_str = get_error_location_string(location_file, location.lineno)
args = (location_str,) + args
log(*args, **kwargs)
global log_fatal_warnings
if log_fatal_warnings:
raise MesonException("Fatal warnings enabled, aborting")
def error(*args, **kwargs):
return _log_error('error', *args, **kwargs)
def warning(*args, **kwargs):
return _log_error('warning', *args, **kwargs)
def deprecation(*args, **kwargs):
return _log_error('deprecation', *args, **kwargs)
def exception(e):
log()
if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):
log('%s:%d:%d:' % (e.file, e.lineno, e.colno), red('ERROR: '), e)
else:
log(red('ERROR:'), e)
# Format a list for logging purposes as a string. It separates
# all but the last item with commas, and the last with 'and'.
def format_list(list):
l = len(list)
if l > 2:
return ' and '.join([', '.join(list[:-1]), list[-1]])
elif l == 2:
return ' and '.join(list)
elif l == 1:
return list[0]
else:
return ''
@contextmanager
def nested():
global log_depth
log_depth += 1
try:
yield
finally:
log_depth -= 1
|
|
"""NDG Security Basic OpenID Authentication Interface.
A demonstration implementation of an authentication interface for
OpenIDProviderMiddleware WSGI. Username/password and OpenId user identifier
details are read from a config file and passed as keywords. This class is not
intended for production use.
NERC DataGrid Project
"""
__author__ = "P J Kershaw"
__date__ = "01/08/08"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__contact__ = "[email protected]"
__revision__ = "$Id$"
import logging
log = logging.getLogger(__name__)
from ndg.security.server.wsgi.openid.provider.authninterface import \
AbstractAuthNInterface, AuthNInterfaceInvalidCredentials, \
AuthNInterfaceRetrieveError, AuthNInterfaceConfigError, \
AuthNInterfaceUsername2IdentifierMismatch
class BasicAuthNInterface(AbstractAuthNInterface):
'''Basic Authentication interface class for OpenIDProviderMiddleware
it uses username/password details retrieved from config file / keyword
entry. This class is for testing only. NOT for production use'''
IDENTITY_URI_TMPL_KEYNAME = 'identityUriTemplate'
USERCREDS_PROPERTY_KEYNAME = 'userCreds'
USERCREDS_KEYNAMES = ('password', 'identifiers')
propertyKeyNames = (
USERCREDS_PROPERTY_KEYNAME
)
getUserIdentifier = staticmethod(lambda identityURI:
identityURI.rsplit('/')[-1])
def __init__(self, **prop):
"""Make any initial settings
Settings are held in a dictionary which can be set from **prop,
a call to setProperties() or by passing settings in an XML file
given by propFilePath
@type **prop: dict
@param **prop: set properties via keywords
@raise AuthNInterfaceConfigError: error with configuration
"""
# Test/Admin username/password set from ini/kw args
self._identityUriTemplate = prop.get(
BasicAuthNInterface.IDENTITY_URI_TMPL_KEYNAME)
userCredsField = prop.get(
BasicAuthNInterface.USERCREDS_PROPERTY_KEYNAME)
if not userCredsField:
raise AuthNInterfaceConfigError('No "%s" config option found' %
BasicAuthNInterface.USERCREDS_PROPERTY_KEYNAME)
self._userCreds = {}
for userEntry in userCredsField.split():
# Split username, password and OpenID name list
userCreds = userEntry.strip().split(':')
# Split OpenID name list
userCreds[-1] = tuple(userCreds[-1].split(','))
# Convert into a dictionary indexed by username
userCredsKeys = BasicAuthNInterface.USERCREDS_KEYNAMES
self._userCreds[userCreds[0]] = dict(zip(userCredsKeys,
userCreds[1:]))
def logon(self, environ, identityURI, username, password):
"""Interface login method
@type environ: dict
@param environ: standard WSGI environ parameter
@type identityURI: basestring
@param identityURI: user's identity URL e.g.
'https://joebloggs.somewhere.ac.uk/'
@type username: basestring
@param username: username
@type password: basestring
@param password: corresponding password for username givens
@raise AuthNInterfaceInvalidCredentials: invalid username/password
@raise AuthNInterfaceUsername2IdentifierMismatch: no OpenID matching
the given username
"""
if self._userCreds.get(username, {}).get('password') != password:
raise AuthNInterfaceInvalidCredentials()
# Assume identifier is at the end of the URI
if identityURI is not None:
userIdentifier = BasicAuthNInterface.getUserIdentifier(identityURI)
if userIdentifier not in self._userCreds[username]['identifiers']:
raise AuthNInterfaceUsername2IdentifierMismatch()
def logout(self):
pass
def username2UserIdentifiers(self, environ, username):
"""Map the login username to an identifier which will become the
unique path suffix to the user's OpenID identifier. The
OpenIDProviderMiddleware takes self.urls['id_url'] and adds it to this
identifier:
identifier = self._authN.username2UserIdentifiers(environ,username)
identityURL = self.urls['url_id'] + '/' + identifier
@type environ: dict
@param environ: standard WSGI environ parameter
@type username: basestring
@param username: user identifier
@rtype: tuple
@return: identifiers to be used to make OpenID user identity URLs.
@raise AuthNInterfaceRetrieveError: error with retrieval of information
to identifier e.g. error with database look-up.
"""
try:
return self._userCreds[username]['identifiers']
except KeyError:
raise AuthNInterfaceRetrieveError('No entries for "%s" user' %
username)
from ndg.security.server.wsgi.utils.sessionmanagerclient import \
WSGISessionManagerClient, AuthNServiceInvalidCredentials
class BasicSessionManagerOpenIDAuthNInterface(BasicAuthNInterface):
'''Authentication interface class for OpenIDProviderMiddleware to enable
authentication to a Session Manager instance running in the same WSGI
stack or via a SOAP call to a remote service. This is a basic test
interface. See sessionmanager module for a full implementation linking to
a database via SQLAlchemy
'''
def __init__(self, **prop):
"""Extends BasicAuthNInterface initialising Session Manager Client
@type **prop: dict
@param **prop: set properties via keywords
@raise AuthNInterfaceConfigError: error with configuration
"""
user2Identifier = prop.pop('username2UserIdentifiers')
if user2Identifier:
self._username2Identifier = {}
for i in user2Identifier.split():
username, identifierStr = i.strip().split(':')
identifiers = tuple(identifierStr.split(','))
self._username2Identifier[username] = identifiers
else:
raise AuthNInterfaceConfigError('No "user2Identifier" config '
'option found')
self._client = WSGISessionManagerClient(**prop)
# This is set at login
self.sessionId = None
def logon(self, environ, userIdentifier, username, password):
"""Interface login method
@type environ: dict
@param environ: standard WSGI environ parameter
@type username: basestring
@param username: user identifier
@type password: basestring
@param password: corresponding password for username givens
@raise AuthNInterfaceUsername2IdentifierMismatch: no OpenID
identifiers match the given username
@raise AuthNInterfaceInvalidCredentials: invalid username/password
"""
if userIdentifier is not None and \
userIdentifier not in self._username2Identifier.get(username):
raise AuthNInterfaceUsername2IdentifierMismatch()
try:
self._client.environ = environ
connectResp = self._client.connect(username, passphrase=password)
self.sessionId = connectResp[-1]
log.debug("Connected to Session Manager with session ID: %s",
self.sessionId)
except AuthNServiceInvalidCredentials, e:
log.exception(e)
raise AuthNInterfaceInvalidCredentials()
def logout(self):
"""logout from the Session Manager
"""
try:
self._client.disconnect(sessID=self.sessionId)
except Exception, e:
log.exception(e)
raise AuthNInterfaceInvalidCredentials()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Categorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
def make_categorical(batch_shape, num_classes, dtype=dtypes.int32):
logits = random_ops.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtypes.float32) - 50.
return categorical.Categorical(logits, dtype=dtype)
class CategoricalTest(test.TestCase):
def testP(self):
p = [0.2, 0.8]
dist = categorical.Categorical(probs=p)
with self.test_session():
self.assertAllClose(p, dist.probs.eval())
self.assertAllEqual([2], dist.logits.get_shape())
def testLogits(self):
p = np.array([0.2, 0.8], dtype=np.float32)
logits = np.log(p) - 50.
dist = categorical.Categorical(logits=logits)
with self.test_session():
self.assertAllEqual([2], dist.probs.get_shape())
self.assertAllEqual([2], dist.logits.get_shape())
self.assertAllClose(dist.probs.eval(), p)
self.assertAllClose(dist.logits.eval(), logits)
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertEqual(10, dist.event_size.eval())
# event_size is available as a constant because the shape is
# known at graph build time.
self.assertEqual(10, tensor_util.constant_value(dist.event_size))
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(
batch_shape, constant_op.constant(
10, dtype=dtypes.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertEqual(10, dist.event_size.eval())
def testDtype(self):
dist = make_categorical([], 5, dtype=dtypes.int32)
self.assertEqual(dist.dtype, dtypes.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
dist = make_categorical([], 5, dtype=dtypes.int64)
self.assertEqual(dist.dtype, dtypes.int64)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dist.entropy().dtype)
self.assertEqual(
dist.logits.dtype, dist.prob(np.array(
0, dtype=np.int64)).dtype)
self.assertEqual(
dist.logits.dtype, dist.log_prob(np.array(
0, dtype=np.int64)).dtype)
def testUnknownShape(self):
with self.test_session():
logits = array_ops.placeholder(dtype=dtypes.float32)
dist = categorical.Categorical(logits)
sample = dist.sample()
# Will sample class 1.
sample_value = sample.eval(feed_dict={logits: [-1000.0, 1000.0]})
self.assertEqual(1, sample_value)
# Batch entry 0 will sample class 1, batch entry 1 will sample class 0.
sample_value_batch = sample.eval(
feed_dict={logits: [[-1000.0, 1000.0], [1000.0, -1000.0]]})
self.assertAllEqual([1, 0], sample_value_batch)
def testPMFWithBatch(self):
histograms = [[0.2, 0.8], [0.6, 0.4]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
with self.test_session():
self.assertAllClose(dist.prob([0, 1]).eval(), [0.2, 0.4])
def testPMFNoBatch(self):
histograms = [0.2, 0.8]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
with self.test_session():
self.assertAllClose(dist.prob(0).eval(), 0.2)
def testCDFWithDynamicEventShape(self):
"""Test that dynamically-sized events with unkown shape work."""
batch_size = 2
histograms = array_ops.placeholder(dtype=dtypes.float32,
shape=(batch_size, None))
event = array_ops.placeholder(dtype=dtypes.float32, shape=(batch_size,))
dist = categorical.Categorical(probs=histograms)
cdf_op = dist.cdf(event)
# Feed values into the placeholder with different shapes
# three classes.
event_feed_one = [0, 1]
histograms_feed_one = [[0.5, 0.3, 0.2], [1.0, 0.0, 0.0]]
expected_cdf_one = [0.0, 1.0]
feed_dict_one = {
histograms: histograms_feed_one,
event: event_feed_one
}
# six classes.
event_feed_two = [2, 5]
histograms_feed_two = [[0.9, 0.0, 0.0, 0.0, 0.0, 0.1],
[0.15, 0.2, 0.05, 0.35, 0.13, 0.12]]
expected_cdf_two = [0.9, 0.88]
feed_dict_two = {
histograms: histograms_feed_two,
event: event_feed_two
}
with self.test_session() as sess:
actual_cdf_one = sess.run(cdf_op, feed_dict=feed_dict_one)
actual_cdf_two = sess.run(cdf_op, feed_dict=feed_dict_two)
self.assertAllClose(actual_cdf_one, expected_cdf_one)
self.assertAllClose(actual_cdf_two, expected_cdf_two)
def testCDFWithBatch(self):
histograms = [[0.1, 0.2, 0.3, 0.25, 0.15],
[0.0, 0.75, 0.2, 0.05, 0.0]]
event = [0, 3]
expected_cdf = [0.0, 0.95]
dist = categorical.Categorical(probs=histograms)
cdf_op = dist.cdf(event)
with self.test_session():
self.assertAllClose(cdf_op.eval(), expected_cdf)
def testCDFNoBatch(self):
histogram = [0.1, 0.2, 0.3, 0.4]
event = 2
expected_cdf = 0.3
dist = categorical.Categorical(probs=histogram)
cdf_op = dist.cdf(event)
with self.test_session():
self.assertAlmostEqual(cdf_op.eval(), expected_cdf)
def testLogPMF(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = categorical.Categorical(logits)
with self.test_session():
self.assertAllClose(dist.log_prob([0, 1]).eval(), np.log([0.2, 0.4]))
def testEntropyNoBatch(self):
logits = np.log([0.2, 0.8]) - 50.
dist = categorical.Categorical(logits)
with self.test_session():
self.assertAllClose(dist.entropy().eval(),
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)))
def testEntropyWithBatch(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = categorical.Categorical(logits)
with self.test_session():
self.assertAllClose(dist.entropy().eval(), [
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
-(0.6 * np.log(0.6) + 0.4 * np.log(0.4))
])
def testEntropyGradient(self):
with self.test_session() as sess:
logits = constant_op.constant([[1., 2., 3.], [2., 5., 1.]])
probabilities = nn_ops.softmax(logits)
log_probabilities = nn_ops.log_softmax(logits)
true_entropy = - math_ops.reduce_sum(
probabilities * log_probabilities, axis=-1)
categorical_distribution = categorical.Categorical(probs=probabilities)
categorical_entropy = categorical_distribution.entropy()
# works
true_entropy_g = gradients_impl.gradients(true_entropy, [logits])
categorical_entropy_g = gradients_impl.gradients(
categorical_entropy, [logits])
res = sess.run({"true_entropy": true_entropy,
"categorical_entropy": categorical_entropy,
"true_entropy_g": true_entropy_g,
"categorical_entropy_g": categorical_entropy_g})
self.assertAllClose(res["true_entropy"],
res["categorical_entropy"])
self.assertAllClose(res["true_entropy_g"],
res["categorical_entropy_g"])
def testSample(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
n = 10000
samples = dist.sample(n, seed=123)
samples.set_shape([n, 1, 2])
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = samples.eval()
self.assertFalse(np.any(sample_values < 0))
self.assertFalse(np.any(sample_values > 1))
self.assertAllClose(
[[0.2, 0.4]], np.mean(
sample_values == 0, axis=0), atol=1e-2)
self.assertAllClose(
[[0.8, 0.6]], np.mean(
sample_values == 1, axis=0), atol=1e-2)
def testSampleWithSampleShape(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
samples = dist.sample((100, 100), seed=123)
prob = dist.prob(samples)
prob_val = prob.eval()
self.assertAllClose(
[0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()], atol=1e-2)
self.assertAllClose(
[0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()], atol=1e-2)
def testLogPMFBroadcasting(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
prob = dist.prob(1)
self.assertAllClose([[0.8, 0.6]], prob.eval())
prob = dist.prob([1])
self.assertAllClose([[0.8, 0.6]], prob.eval())
prob = dist.prob([0, 1])
self.assertAllClose([[0.2, 0.6]], prob.eval())
prob = dist.prob([[0, 1]])
self.assertAllClose([[0.2, 0.6]], prob.eval())
prob = dist.prob([[[0, 1]]])
self.assertAllClose([[[0.2, 0.6]]], prob.eval())
prob = dist.prob([[1, 0], [0, 1]])
self.assertAllClose([[0.8, 0.4], [0.2, 0.6]], prob.eval())
prob = dist.prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertAllClose([[[0.8, 0.6], [0.8, 0.4]], [[0.8, 0.4], [0.2, 0.6]]],
prob.eval())
def testLogPMFShape(self):
with self.test_session():
# shape [1, 2, 2]
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms))
log_prob = dist.log_prob([0, 1])
self.assertEqual(2, log_prob.get_shape().ndims)
self.assertAllEqual([1, 2], log_prob.get_shape())
log_prob = dist.log_prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertEqual(3, log_prob.get_shape().ndims)
self.assertAllEqual([2, 2, 2], log_prob.get_shape())
def testLogPMFShapeNoBatch(self):
histograms = [0.2, 0.8]
dist = categorical.Categorical(math_ops.log(histograms))
log_prob = dist.log_prob(0)
self.assertEqual(0, log_prob.get_shape().ndims)
self.assertAllEqual([], log_prob.get_shape())
log_prob = dist.log_prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertEqual(3, log_prob.get_shape().ndims)
self.assertAllEqual([2, 2, 2], log_prob.get_shape())
def testMode(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.6, 0.4]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
self.assertAllEqual(dist.mode().eval(), [[1, 0]])
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.test_session() as sess:
for categories in [2, 4]:
for batch_size in [1, 10]:
a_logits = np.random.randn(batch_size, categories)
b_logits = np.random.randn(batch_size, categories)
a = categorical.Categorical(logits=a_logits)
b = categorical.Categorical(logits=b_logits)
kl = kullback_leibler.kl_divergence(a, b)
kl_val = sess.run(kl)
# Make sure KL(a||a) is 0
kl_same = sess.run(kullback_leibler.kl_divergence(a, a))
prob_a = np_softmax(a_logits)
prob_b = np_softmax(b_logits)
kl_expected = np.sum(prob_a * (np.log(prob_a) - np.log(prob_b)),
axis=-1)
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
if __name__ == "__main__":
test.main()
|
|
import re
import json
from tornado.web import HTTPError
from biothings.www.helper import BaseHandler
from biothings.utils.common import split_ids
class BiothingHandler(BaseHandler):
def _ga_event_object(self, action, data={}):
''' Returns the google analytics object for requests on this endpoint (annotation handler).'''
return self._settings.ga_event_object(endpoint=self._settings._annotation_endpoint, action=action, data=data)
def _examine_kwargs(self, action, kwargs):
''' A function for sub-classing. This will be run after the get_query_params but before the actual
elasticsearch querying. '''
if action == 'GET':
pass
elif action == 'POST':
pass
pass
def get(self, bid=None):
'''
'''
if bid:
kwargs = self.get_query_params()
self._examine_kwargs('GET', kwargs)
biothing_object = self.esq.get_biothing(bid, **kwargs)
if biothing_object:
self.return_json(biothing_object)
self.ga_track(settings=self._settings, event=self._ga_event_object('GET'))
else:
raise HTTPError(404)
else:
raise HTTPError(404)
def post(self, ids=None):
'''
This is essentially the same as post request in QueryHandler, with different defaults.
parameters:
ids
fields
email
'''
kwargs = self.get_query_params()
self._examine_kwargs('POST', kwargs)
ids = kwargs.pop('ids', None)
if ids:
ids = re.split('[\s\r\n+|,]+', ids)
res = self.esq.mget_biothings(ids, **kwargs)
else:
res = {'success': False, 'error': "Missing required parameters."}
encode = not isinstance(res, str) # when res is a string, e.g. when rawquery is true, do not encode it as json
self.return_json(res, encode=encode)
self.ga_track(settings=self._settings, event=self._ga_event_object('POST', {'qsize': len(ids) if ids else 0}))
class QueryHandler(BaseHandler):
def _ga_event_object(self, action, data={}):
''' Returns the google analytics object for requests on this endpoint (query handler).'''
return self._settings.ga_event_object(endpoint=self._settings._query_endpoint, action=action, data=data)
def _examine_kwargs(self, action, kwargs):
''' A function for sub-classing. This will be run after the get_query_params but before the actual
elasticsearch querying. '''
if action == 'GET':
pass
elif action == 'POST':
pass
pass
def get(self):
'''
parameters:
q
fields
from
size
sort
facets
callback
email
fetch_all
explain
raw
'''
kwargs = self.get_query_params()
self._examine_kwargs('GET', kwargs)
q = kwargs.pop('q', None)
scroll_id = kwargs.pop('scroll_id', None)
_has_error = False
if scroll_id:
res = self.esq.scroll(scroll_id, **kwargs)
elif q:
for arg in ['from', 'size']:
value = kwargs.get(arg, None)
if value:
try:
kwargs[arg] = int(value)
except ValueError:
res = {'success': False, 'error': 'Parameter "{}" must be an integer.'.format(arg)}
_has_error = True
if not _has_error:
res = self.esq.query(q, **kwargs)
if kwargs.get('fetch_all', False):
self.ga_track(settings=self._settings, event=self._ga_event_object('fetch_all', {'total': res.get('total', None)}))
else:
res = {'success': False, 'error': "Missing required parameters."}
self.return_json(res)
self.ga_track(settings=self._settings, event=self._ga_event_object('GET', {'qsize': len(q) if q else 0}))
def post(self):
'''
parameters:
q
scopes
fields
email
jsoninput if true, input "q" is a json string, must be decoded as a list.
'''
kwargs = self.get_query_params()
self._examine_kwargs('POST', kwargs)
q = kwargs.pop('q', None)
jsoninput = kwargs.pop('jsoninput', None) in ('1', 'true')
if q:
# ids = re.split('[\s\r\n+|,]+', q)
try:
ids = json.loads(q) if jsoninput else split_ids(q)
if not isinstance(ids, list):
raise ValueError
except ValueError:
ids = None
res = {'success': False, 'error': 'Invalid input for "q" parameter.'}
if ids:
scopes = kwargs.pop('scopes', None)
fields = kwargs.pop('fields', None)
res = self.esq.mget_biothings(ids, fields=fields, scopes=scopes, **kwargs)
else:
res = {'success': False, 'error': "Missing required parameters."}
encode = not isinstance(res, str) # when res is a string, e.g. when rawquery is true, do not encode it as json
self.return_json(res, encode=encode)
self.ga_track(settings=self._settings, event=self._ga_event_object('POST', {'qsize': len(q) if q else 0}))
class MetaDataHandler(BaseHandler):
def get(self):
self.return_json({})
class FieldsHandler(BaseHandler):
def get(self):
if self._settings.field_notes_path:
notes = json.load(open(self._settings.field_notes_path, 'r'))
else:
notes = {}
es_mapping = self.esq.query_fields()
kwargs = self.get_query_params()
def get_indexed_properties_in_dict(d, prefix):
r = {}
for (k, v) in d.items():
r[prefix + '.' + k] = {}
r[prefix + '.' + k]['indexed'] = False
if 'type' in v:
r[prefix + '.' + k]['type'] = v['type']
if ('index' not in v) or ('index' in v and v['index'] != 'no'):
# indexed field
r[prefix + '.' + k]['indexed'] = True
else:
r[prefix + '.' + k]['type'] = 'object'
r.update(get_indexed_properties_in_dict(v['properties'], prefix + '.' + k))
if ('include_in_all' in v) and v['include_in_all']:
r[prefix + '.' + k]['include_in_all'] = True
else:
r[prefix + '.' + k]['include_in_all'] = False
return r
r = {}
search = kwargs.pop('search', None)
prefix = kwargs.pop('prefix', None)
for (k, v) in get_indexed_properties_in_dict(es_mapping, '').items():
k1 = k.lstrip('.')
if (search and search in k1) or (prefix and k1.startswith(prefix)) or (not search and not prefix):
r[k1] = v
if k1 in notes:
r[k1]['notes'] = notes[k1]
self.return_json(r)
class StatusHandler(BaseHandler):
''' Handles requests to check the status of the server. '''
def head(self):
r = esq.status_check(self._settings.status_check_id)
def get(self):
self.head()
self.write('OK')
|
|
'''dossier.label.relation_label
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2015 Diffeo, Inc.
'''
from __future__ import absolute_import
import cbor
from collections import Container, Hashable
import logging
import time
import enum
from itertools import imap, ifilter
from total_ordering import total_ordering
from dossier.label.label import time_complement, LabelStore
logger = logging.getLogger(__name__)
class RelationStrength(enum.Enum):
'''A human-assigned value for the relation type.
NONE - these entities are not related in any meaningful way.
UNKNOWN - the relationship between these entities are unknown.
AKA - The two entities are the same (coreference).
WEAK - The entities are related but not in a particularly interesting way.
STRONG - The entities are strongly related.
'''
NONE = -1
UNKNOWN = 0
WEAK = 1
STRONG = 2
AKA = 3
@property
def is_positive(self):
return (self == RelationStrength.WEAK or
self == RelationStrength.STRONG or
self == RelationStrength.AKA)
@property
def is_negative(self):
return not self.is_positive
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
@total_ordering
class RelationLabel(Container, Hashable):
'''An immutable unit of ground truth data for relationships.
A ``RelationLabel`` is a statement saying that the item at
:attr:`content_id1` is related to the item at
:attr:`content_id2`. The statement was recorded by
:attr:`annotator_id`, a string identifying a user, at
:attr:`epoch_ticks`. The strength of the relationship is specified
by :attr:`rel_strength`, which is of type :class:`RelationStrength`.
On creation, the tuple is normalized such that `content_id1` is
less than `content_id2`.
RelationLabels are comparable, sortable, and hashable. The sort
order compares the two content ids, the annotator id, the epoch
ticks, and then rel_strength.
.. attribute:: content_id1
The first content ID
.. attribute:: content_id2
The second content ID
.. attribute:: annotator_id
An identifier of the user making this label.
.. attribute:: rel_strength
A :class:`RelationStrength` describing the strength of the relationship.
.. attribute:: epoch_ticks
The time at which :attr:`annotator_id` made this assertion, in
seconds since the Unix epoch.
.. attribute:: meta
Any additional meta data about this relation label, as a
dictionary.
'''
def __init__(self, content_id1, content_id2, annotator_id, rel_strength,
epoch_ticks=None, meta=None):
super(RelationLabel, self).__init__()
if isinstance(rel_strength, int):
rel_strength = RelationStrength(rel_strength)
if epoch_ticks is None:
epoch_ticks = long(time.time())
if content_id2 < content_id1:
self.content_id1 = content_id2
self.content_id2 = content_id1
else:
self.content_id1 = content_id1
self.content_id2 = content_id2
self.annotator_id = annotator_id
self.rel_strength = rel_strength
self.epoch_ticks = epoch_ticks
self.meta = meta
if self.meta is None:
self.meta = {}
def as_dict(self):
'''Returns this RelationLabel as a Python dictionary.
'''
return {
'content_id1': self.content_id1,
'content_id2': self.content_id2,
'rel_strength': self.rel_strength,
'annotator_id': self.annotator_id,
'epoch_ticks': self.epoch_ticks,
'meta': self.meta,
}
def __contains__(self, cid):
'''Checks if a cid is one of the identifiers in this RelationLabel.
'''
return cid == self.content_id1 or cid == self.content_id2
def other(self, cid):
if cid == self.content_id1:
return self.content_id2
elif cid == self.content_id2:
return self.content_id1
else:
raise KeyError(cid)
def __lt__(self, other):
if self.content_id1 != other.content_id1:
return self.content_id1 < other.content_id1
if self.content_id2 != other.content_id2:
return self.content_id2 < other.content_id2
if self.annotator_id != other.annotator_id:
return self.annotator_id < other.annotator_id
if self.epoch_ticks != other.epoch_ticks:
return self.epoch_ticks < other.epoch_ticks
if self.rel_strength is not other.rel_strength:
return self.rel_strength < other.rel_strength
return False
def __eq__(self, other):
if self.content_id1 != other.content_id1:
return False
if self.content_id2 != other.content_id2:
return False
if self.annotator_id != other.annotator_id:
return False
if self.epoch_ticks != other.epoch_ticks:
return False
if self.rel_strength != other.rel_strength:
return False
return True
def __hash__(self):
return (hash(self.content_id1) ^
hash(self.content_id2) ^
hash(self.annotator_id) ^
hash(self.epoch_ticks) ^
hash(self.rel_strength))
class RelationLabelStore(object):
'''A relation label database.
.. automethod:: __init__
.. automethod:: put
.. automethod:: get
.. automethod:: get_related
.. automethod:: everything
.. automethod:: delete_all
'''
TABLE = 'rel_label'
config_name = 'relation_label_store'
_kvlayer_namespace = {
# (cid1, cid2, annotator_id, time) -> (rel_strength, meta)
TABLE: (str, str, str, long),
}
def __init__(self, kvlclient):
'''Create a new relation label store.
'''
self.kvl = kvlclient
self.kvl.setup_namespace(self._kvlayer_namespace)
def _keys_from_label(self, label):
'''Convert a label into a kvl key.
'''
k1 = (label.content_id1, label.content_id2,
label.annotator_id, time_complement(label.epoch_ticks))
k2 = (label.content_id2, label.content_id1,
label.annotator_id, time_complement(label.epoch_ticks))
return k1, k2
def _value_from_label(self, label):
'''Convert a label into a kvl value.
'''
unser_val = (label.rel_strength.value, label.meta)
return cbor.dumps(unser_val)
def _label_from_kvlayer(self, key, val):
'''Make a label from a kvlayer row.
'''
(content_id1, content_id2, annotator_id,
inverted_epoch_ticks) = key
epoch_ticks = time_complement(inverted_epoch_ticks)
rel_strength, meta = cbor.loads(val)
return RelationLabel(content_id1, content_id2, annotator_id,
RelationStrength(rel_strength),
epoch_ticks=epoch_ticks, meta=meta)
def put(self, *labels):
'''Add a new relation label to the store.
'''
puts = []
for label in labels:
k1, k2 = self._keys_from_label(label)
v = self._value_from_label(label)
puts.append((k1, v))
puts.append((k2, v))
self.kvl.put(self.TABLE, *puts)
def get(self, cid1, cid2, annotator_id):
'''Retrieve a relation label from the store.
'''
t = (cid1, cid2, annotator_id)
for k, v in self.kvl.scan(self.TABLE, (t, t)):
return self._label_from_kvlayer(k, v)
def get_related(self, content_id, min_strength=None):
'''Get positive relation labels for ``cid``.
If ``min_strength`` is set, will restrict results to labels
with a ``rel_strength`` greater or equal to the provided
``RelationStrength`` value. Note: ``min_strength`` should be of
type ``RelationStrength``.
'''
def is_related(label):
if min_strength is not None:
return label.rel_strength >= min_strength
else:
return label.rel_strength.is_positive
labels = self.everything(content_id=content_id)
return ifilter(is_related, labels)
def get_related_ids(self, content_id, min_strength=None):
'''Get identifiers for related identifiers.
'''
related_labels = self.get_related(content_id,
min_strength=min_strength)
related_idents = set()
for label in related_labels:
related_idents.add(label.other(content_id))
return list(related_idents)
def get_relationships_for_idents(self, cid, idents):
'''Get relationships between ``idents`` and a ``cid``.
Returns a dictionary mapping the identifiers in ``idents``
to either None, if no relationship label is found between
the identifier and ``cid``, or a RelationshipType classifying
the strength of the relationship between the identifier and
``cid``.
'''
keys = [(cid, ident,) for ident in idents]
key_ranges = zip(keys, keys)
mapping = {}
for k, v in self.kvl.scan(self.TABLE, *key_ranges):
label = self._label_from_kvlayer(k, v)
ident = label.other(cid)
rel_strength = label.rel_strength
mapping[ident] = label.rel_strength
return mapping
def everything(self, content_id=None):
'''Returns a generator of all labels in the store.
If ``content_id`` is set, will restrict results to labels
containing the provided ``content_id``.
'''
if content_id is not None:
ranges = [((content_id,), (content_id,))]
else:
ranges = []
labels = self.kvl.scan(self.TABLE, *ranges)
labels = imap(lambda p: self._label_from_kvlayer(*p), labels)
return labels
def delete_all(self):
'''Delete all labels in the store.
'''
self.kvl.clear_table(self.TABLE)
|
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".wgt"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/%s" % (PKG_SRC_DIR, item_name)):
#if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
for item in glob.glob("%s/sse/support/cgi/*" % SCRIPT_DIR):
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/sse/support/%s" % (PKG_SRC_DIR, item_name)):
action_status = False
for item in glob.glob("%s/sse/w3c/resources/cgi/*" % SCRIPT_DIR):
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/sse/w3c/resources/%s" % (PKG_SRC_DIR, item_name)):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mox
from neutronclient.common import exceptions as n_exc
from neutronclient.v2_0 import client
from nova.api.openstack.compute.contrib import security_groups
from nova import context
from nova import exception
from nova.network import neutronv2
from nova.network.security_group import neutron_driver
from nova import test
class TestNeutronDriver(test.NoDBTestCase):
def setUp(self):
super(TestNeutronDriver, self).setUp()
self.mox.StubOutWithMock(neutronv2, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
def test_list_with_project(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
security_groups_list = {'security_groups': []}
self.moxed_client.list_security_groups(tenant_id=project_id).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(self.context, project=project_id)
def test_create_security_group_exceed_quota(self):
name = 'test-security-group'
description = 'test-security-group'
body = {'security_group': {'name': name,
'description': description}}
message = "Quota exceeded for resources: ['security_group']"
self.moxed_client.create_security_group(
body).AndRaise(n_exc.NeutronClientException(status_code=409,
message=message))
self.mox.ReplayAll()
sg_api = security_groups.NativeNeutronSecurityGroupAPI()
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.create_security_group, self.context, name,
description)
def test_create_security_group_rules_exceed_quota(self):
vals = {'protocol': 'tcp', 'cidr': '0.0.0.0/0',
'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'group_id': None, 'from_port': 1025, 'to_port': 1025}
body = {'security_group_rules': [{'remote_group_id': None,
'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 1025, 'port_range_min': 1025,
'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'remote_ip_prefix': '0.0.0.0/0'}]}
name = 'test-security-group'
message = "Quota exceeded for resources: ['security_group_rule']"
self.moxed_client.create_security_group_rule(
body).AndRaise(n_exc.NeutronClientException(status_code=409,
message=message))
self.mox.ReplayAll()
sg_api = security_groups.NativeNeutronSecurityGroupAPI()
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.add_rules, self.context, None, name, [vals])
def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self):
sg1 = {'description': 'default',
'id': '07f1362f-34f6-4136-819a-2dcde112269e',
'name': 'default',
'tenant_id': 'c166d9316f814891bcb66b96c4c891d6',
'security_group_rules':
[{'direction': 'ingress',
'ethertype': 'IPv4',
'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb',
'port_range_max': None,
'port_range_min': None,
'protocol': '51',
'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id':
'07f1362f-34f6-4136-819a-2dcde112269e',
'tenant_id': 'c166d9316f814891bcb66b96c4c891d6'}]}
self.moxed_client.list_security_groups().AndReturn(
{'security_groups': [sg1]})
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.list(self.context)
expected = [{'rules':
[{'from_port': -1, 'protocol': '51', 'to_port': -1,
'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e',
'cidr': '0.0.0.0/0', 'group_id': None,
'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb'}],
'project_id': 'c166d9316f814891bcb66b96c4c891d6',
'id': '07f1362f-34f6-4136-819a-2dcde112269e',
'name': 'default', 'description': 'default'}]
self.assertEqual(expected, result)
def test_instances_security_group_bindings(self):
server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1'
port1_id = '4c505aec-09aa-47bc-bcc0-940477e84dc0'
port2_id = 'b3b31a53-6e29-479f-ae5c-00b7b71a6d44'
sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
servers = [{'id': server_id}]
ports = [{'id': port1_id, 'device_id': server_id,
'security_groups': [sg1_id]},
{'id': port2_id, 'device_id': server_id,
'security_groups': [sg2_id]}]
port_list = {'ports': ports}
sg1 = {'id': sg1_id, 'name': 'wol'}
sg2 = {'id': sg2_id, 'name': 'eor'}
security_groups_list = {'security_groups': [sg1, sg2]}
sg_bindings = {server_id: [{'name': 'wol'}, {'name': 'eor'}]}
self.moxed_client.list_ports(device_id=[server_id]).AndReturn(
port_list)
self.moxed_client.list_security_groups(id=[sg2_id, sg1_id]).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def _test_instances_security_group_bindings_scale(self, num_servers):
max_query = 150
sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
sg1 = {'id': sg1_id, 'name': 'wol'}
sg2 = {'id': sg2_id, 'name': 'eor'}
security_groups_list = {'security_groups': [sg1, sg2]}
servers = []
device_ids = []
ports = []
sg_bindings = {}
for i in xrange(0, num_servers):
server_id = "server-%d" % i
port_id = "port-%d" % i
servers.append({'id': server_id})
device_ids.append(server_id)
ports.append({'id': port_id,
'device_id': server_id,
'security_groups': [sg1_id, sg2_id]})
sg_bindings[server_id] = [{'name': 'wol'}, {'name': 'eor'}]
for x in xrange(0, num_servers, max_query):
self.moxed_client.list_ports(
device_id=device_ids[x:x + max_query]).\
AndReturn({'ports': ports[x:x + max_query]})
self.moxed_client.list_security_groups(id=[sg2_id, sg1_id]).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def test_instances_security_group_bindings_less_than_max(self):
self._test_instances_security_group_bindings_scale(100)
def test_instances_security_group_bindings_max(self):
self._test_instances_security_group_bindings_scale(150)
def test_instances_security_group_bindings_more_then_max(self):
self._test_instances_security_group_bindings_scale(300)
def test_instances_security_group_bindings_with_hidden_sg(self):
servers = [{'id': 'server_1'}]
ports = [{'id': '1', 'device_id': 'dev_1', 'security_groups': ['1']},
{'id': '2', 'device_id': 'dev_1', 'security_groups': ['2']}]
port_list = {'ports': ports}
sg1 = {'id': '1', 'name': 'wol'}
sg2 = {'id': '2', 'name': 'eor'}
# User doesn't have access to sg2
security_groups_list = {'security_groups': [sg1]}
sg_bindings = {'dev_1': [{'name': 'wol'}]}
self.moxed_client.list_ports(device_id=['server_1']).AndReturn(
port_list)
self.moxed_client.list_security_groups(id=['1', '2']).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
|
|
# Copyright 2011 NTT
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import calendar
import contextlib
import datetime
import os
import time
import mock
from mox3 import mox
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from oslo_concurrency import processutils
from nova import context
from nova import db
from nova import exception
from nova.network import driver
from nova.network import linux_net
from nova import objects
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova import test
from nova import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('share_dhcp_address', 'nova.objects.network')
CONF.import_opt('network_device_mtu', 'nova.objects.network')
HOST = "testhost"
instances = {'00000000-0000-0000-0000-0000000000000000':
{'id': 0,
'uuid': '00000000-0000-0000-0000-0000000000000000',
'host': 'fake_instance00',
'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0),
'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0),
'hostname': 'fake_instance00'},
'00000000-0000-0000-0000-0000000000000001':
{'id': 1,
'uuid': '00000000-0000-0000-0000-0000000000000001',
'host': 'fake_instance01',
'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0),
'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0),
'hostname': 'fake_instance01'},
'00000000-0000-0000-0000-0000000000000002':
{'id': 2,
'uuid': '00000000-0000-0000-0000-0000000000000002',
'host': 'fake_instance02',
'created_at': datetime.datetime(1955, 11, 5, 0, 0, 0),
'updated_at': datetime.datetime(1985, 10, 26, 1, 35, 0),
'hostname': 'really_long_fake_instance02_to_test_hostname_'
'truncation_when_too_long'}}
addresses = [{"address": "10.0.0.1"},
{"address": "10.0.0.2"},
{"address": "10.0.0.3"},
{"address": "10.0.0.4"},
{"address": "10.0.0.5"},
{"address": "10.0.0.6"}]
networks = [{'id': 0,
'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'mtu': None,
'dhcp_server': '192.168.0.1',
'enable_dhcp': True,
'share_address': False},
{'id': 1,
'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'label': 'test1',
'injected': False,
'multi_host': True,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'mtu': None,
'dhcp_server': '192.168.1.1',
'enable_dhcp': True,
'share_address': False},
{'id': 2,
'uuid': "cccccccc-cccc-cccc-cccc-cccccccccccc",
'label': 'test2',
'injected': False,
'multi_host': True,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:db10::/64',
'gateway_v6': '2001:db10::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa2',
'bridge_interface': 'fake_fa2',
'gateway': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'mtu': None,
'dhcp_server': '192.168.2.1',
'enable_dhcp': True,
'share_address': False}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_id': 0,
'allocated': True,
'leased': True,
'virtual_interface_id': 0,
'default_route': True,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 1,
'network_id': 1,
'address': '192.168.1.100',
'instance_id': 0,
'allocated': True,
'leased': True,
'virtual_interface_id': 1,
'default_route': False,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 2,
'network_id': 1,
'address': '192.168.0.101',
'instance_id': 1,
'allocated': True,
'leased': True,
'virtual_interface_id': 2,
'default_route': True,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 3,
'network_id': 0,
'address': '192.168.1.101',
'instance_id': 1,
'allocated': True,
'leased': True,
'virtual_interface_id': 3,
'default_route': False,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 4,
'network_id': 0,
'address': '192.168.0.102',
'instance_id': 0,
'allocated': True,
'leased': False,
'virtual_interface_id': 4,
'default_route': False,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
{'id': 5,
'network_id': 1,
'address': '192.168.1.102',
'instance_id': 1,
'allocated': True,
'leased': False,
'virtual_interface_id': 5,
'default_route': False,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 6,
'network_id': 1,
'address': '192.168.1.103',
'instance_id': 1,
'allocated': False,
'leased': True,
'virtual_interface_id': 6,
'default_route': False,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
{'id': 7,
'network_id': 2,
'address': '192.168.2.100',
'instance_id': 2,
'allocated': True,
'leased': False,
'virtual_interface_id': 7,
'default_route': False,
'instance_uuid': '00000000-0000-0000-0000-0000000000000002',
'floating_ips': []}]
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 3,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:03',
'uuid': '00000000-0000-0000-0000-0000000000000003',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 4,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:04',
'uuid': '00000000-0000-0000-0000-0000000000000004',
'network_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000'},
{'id': 5,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:05',
'uuid': '00000000-0000-0000-0000-0000000000000005',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 6,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:06',
'uuid': '00000000-0000-0000-0000-0000000000000006',
'network_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'},
{'id': 7,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:07',
'uuid': '00000000-0000-0000-0000-0000000000000007',
'network_id': 2,
'instance_uuid': '00000000-0000-0000-0000-0000000000000002'}]
def get_associated(context, network_id, host=None, address=None):
result = []
for datum in fixed_ips:
if (datum['network_id'] == network_id
and datum['instance_uuid'] is not None
and datum['virtual_interface_id'] is not None):
instance = instances[datum['instance_uuid']]
if host and host != instance['host']:
continue
if address and address != datum['address']:
continue
cleaned = {}
cleaned['address'] = datum['address']
cleaned['instance_uuid'] = datum['instance_uuid']
cleaned['network_id'] = datum['network_id']
cleaned['vif_id'] = datum['virtual_interface_id']
vif = vifs[datum['virtual_interface_id']]
cleaned['vif_address'] = vif['address']
cleaned['instance_hostname'] = instance['hostname']
cleaned['instance_updated'] = instance['updated_at']
cleaned['instance_created'] = instance['created_at']
cleaned['allocated'] = datum['allocated']
cleaned['leased'] = datum['leased']
cleaned['default_route'] = datum['default_route']
result.append(cleaned)
return result
class LinuxNetworkTestCase(test.NoDBTestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(LinuxNetworkTestCase, self).setUp()
self.driver = driver.load_network_driver()
self.driver.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=True)
def get_vifs(_context, instance_uuid, use_slave):
return [vif for vif in vifs if vif['instance_uuid'] ==
instance_uuid]
def get_instance(_context, instance_id):
return instances[instance_id]
self.stubs.Set(db, 'virtual_interface_get_by_instance', get_vifs)
self.stubs.Set(db, 'instance_get', get_instance)
self.stubs.Set(db, 'network_get_associated_fixed_ips', get_associated)
def _test_add_snat_rule(self, expected, is_external):
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'snat')
self.assertEqual(rule, expected)
self.called = True
self.stubs.Set(linux_net.iptables_manager.ipv4['nat'],
'add_rule', verify_add_rule)
self.called = False
linux_net.add_snat_rule('10.0.0.0/24', is_external)
if expected:
self.assertTrue(self.called)
def test_add_snat_rule_no_ext(self):
self.flags(routing_source_ip='10.10.10.1')
expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 '
'-j SNAT --to-source 10.10.10.1 -o eth0')
self._test_add_snat_rule(expected, False)
def test_add_snat_rule_ext(self):
self.flags(routing_source_ip='10.10.10.1')
expected = ()
self._test_add_snat_rule(expected, True)
def test_add_snat_rule_snat_range_no_ext(self):
self.flags(routing_source_ip='10.10.10.1',
force_snat_range=['10.10.10.0/24'])
expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 '
'-j SNAT --to-source 10.10.10.1 -o eth0')
self._test_add_snat_rule(expected, False)
def test_add_snat_rule_snat_range_ext(self):
self.flags(routing_source_ip='10.10.10.1',
force_snat_range=['10.10.10.0/24'])
expected = ('-s 10.0.0.0/24 -d 10.10.10.0/24 '
'-j SNAT --to-source 10.10.10.1')
self._test_add_snat_rule(expected, True)
def test_update_dhcp_for_nw00(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(self.driver, 'write_to_file')
self.mox.StubOutWithMock(fileutils, 'ensure_tree')
self.mox.StubOutWithMock(os, 'chmod')
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.update_dhcp(self.context, "eth0", networks[0])
def test_update_dhcp_for_nw01(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(self.driver, 'write_to_file')
self.mox.StubOutWithMock(fileutils, 'ensure_tree')
self.mox.StubOutWithMock(os, 'chmod')
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
fileutils.ensure_tree(mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
os.chmod(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.update_dhcp(self.context, "eth0", networks[0])
def _get_fixedips(self, network, host=None):
return objects.FixedIPList.get_by_network(self.context,
network,
host=host)
def test_get_dhcp_hosts_for_nw00(self):
self.flags(use_single_default_gateway=True)
expected = (
"DE:AD:BE:EF:00:00,fake_instance00.novalocal,"
"192.168.0.100,net:NW-0\n"
"DE:AD:BE:EF:00:03,fake_instance01.novalocal,"
"192.168.1.101,net:NW-3\n"
"DE:AD:BE:EF:00:04,fake_instance00.novalocal,"
"192.168.0.102,net:NW-4"
)
fixedips = self._get_fixedips(networks[0])
actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[0],
fixedips)
self.assertEqual(actual_hosts, expected)
def test_get_dhcp_hosts_for_nw01(self):
self.flags(use_single_default_gateway=True)
expected = (
"DE:AD:BE:EF:00:02,fake_instance01.novalocal,"
"192.168.0.101,net:NW-2\n"
"DE:AD:BE:EF:00:05,fake_instance01.novalocal,"
"192.168.1.102,net:NW-5"
)
fixedips = self._get_fixedips(networks[1], host='fake_instance01')
actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[1],
fixedips)
self.assertEqual(actual_hosts, expected)
def test_get_dns_hosts_for_nw00(self):
expected = (
"192.168.0.100\tfake_instance00.novalocal\n"
"192.168.1.101\tfake_instance01.novalocal\n"
"192.168.0.102\tfake_instance00.novalocal"
)
actual_hosts = self.driver.get_dns_hosts(self.context, networks[0])
self.assertEqual(actual_hosts, expected)
def test_get_dns_hosts_for_nw01(self):
expected = (
"192.168.1.100\tfake_instance00.novalocal\n"
"192.168.0.101\tfake_instance01.novalocal\n"
"192.168.1.102\tfake_instance01.novalocal"
)
actual_hosts = self.driver.get_dns_hosts(self.context, networks[1])
self.assertEqual(actual_hosts, expected)
def test_get_dhcp_opts_for_nw00(self):
self.flags(use_single_default_gateway=True)
expected_opts = 'NW-0,3,192.168.0.1\nNW-3,3\nNW-4,3'
fixedips = self._get_fixedips(networks[0])
actual_opts = self.driver.get_dhcp_opts(self.context, networks[0],
fixedips)
self.assertEqual(actual_opts, expected_opts)
def test_get_dhcp_opts_for_nw00_no_single_default_gateway(self):
self.flags(use_single_default_gateway=False)
expected_opts = '3,192.168.0.1'
fixedips = self._get_fixedips(networks[0])
actual_opts = self.driver.get_dhcp_opts(self.context, networks[0],
fixedips)
self.assertEqual(actual_opts, expected_opts)
def test_get_dhcp_opts_for_nw01(self):
self.flags(use_single_default_gateway=True)
expected_opts = "NW-2,3,192.168.1.1\nNW-5,3"
fixedips = self._get_fixedips(networks[1], 'fake_instance01')
actual_opts = self.driver.get_dhcp_opts(self.context, networks[1],
fixedips)
self.assertEqual(actual_opts, expected_opts)
def test_get_dhcp_leases_for_nw00(self):
timestamp = timeutils.utcnow()
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
leases = self.driver.get_dhcp_leases(self.context, networks[0])
leases = leases.split('\n')
for lease in leases:
lease = lease.split(' ')
data = get_associated(self.context, 0, address=lease[2])[0]
self.assertTrue(data['allocated'])
self.assertTrue(data['leased'])
self.assertTrue(lease[0] > seconds_since_epoch)
self.assertEqual(data['vif_address'], lease[1])
self.assertEqual(data['address'], lease[2])
self.assertEqual(data['instance_hostname'], lease[3])
self.assertEqual('*', lease[4])
def test_get_dhcp_leases_for_nw01(self):
self.flags(host='fake_instance01')
timestamp = timeutils.utcnow()
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
leases = self.driver.get_dhcp_leases(self.context, networks[1])
leases = leases.split('\n')
for lease in leases:
lease = lease.split(' ')
data = get_associated(self.context, 1, address=lease[2])[0]
self.assertTrue(data['leased'])
self.assertTrue(lease[0] > seconds_since_epoch)
self.assertEqual(data['vif_address'], lease[1])
self.assertEqual(data['address'], lease[2])
self.assertEqual(data['instance_hostname'], lease[3])
self.assertEqual('*', lease[4])
def test_dhcp_opts_not_default_gateway_network(self):
expected = "NW-0,3"
fixedip = objects.FixedIPList.get_by_network(self.context,
{'id': 0})[0]
actual = self.driver._host_dhcp_opts(fixedip.virtual_interface_id)
self.assertEqual(actual, expected)
def test_host_dhcp_without_default_gateway_network(self):
expected = ','.join(['DE:AD:BE:EF:00:00',
'fake_instance00.novalocal',
'192.168.0.100'])
fixedip = objects.FixedIPList.get_by_network(self.context,
{'id': 0})[0]
actual = self.driver._host_dhcp(fixedip)
self.assertEqual(actual, expected)
def test_host_dhcp_truncated_hostname(self):
expected = ','.join(['DE:AD:BE:EF:00:07',
're-ng_fake_instance02_to_test_hostname_'
'truncation_when_too_long.novalocal',
'192.168.2.100'])
fixedip = objects.FixedIPList.get_by_network(self.context,
{'id': 2})[0]
actual = self.driver._host_dhcp(fixedip)
self.assertEqual(expected, actual)
def test_host_dns_without_default_gateway_network(self):
expected = "192.168.0.100\tfake_instance00.novalocal"
fixedip = objects.FixedIPList.get_by_network(self.context,
{'id': 0})[0]
actual = self.driver._host_dns(fixedip)
self.assertEqual(actual, expected)
def test_linux_bridge_driver_plug(self):
"""Makes sure plug doesn't drop FORWARD by default.
Ensures bug 890195 doesn't reappear.
"""
def fake_execute(*args, **kwargs):
return "", ""
self.stubs.Set(utils, 'execute', fake_execute)
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'FORWARD')
self.assertIn('ACCEPT', rule)
self.stubs.Set(linux_net.iptables_manager.ipv4['filter'],
'add_rule', verify_add_rule)
driver = linux_net.LinuxBridgeInterfaceDriver()
driver.plug({"bridge": "br100", "bridge_interface": "eth0",
"share_address": False}, "fakemac")
def test_linux_ovs_driver_plug_exception(self):
self.flags(fake_network=False)
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError('error')
def fake_device_exists(*args, **kwargs):
return False
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(linux_net, 'device_exists', fake_device_exists)
driver = linux_net.LinuxOVSInterfaceDriver()
self.assertRaises(exception.AgentError,
driver.plug, {'uuid': 'fake_network_uuid'},
'fake_mac')
def test_vlan_override(self):
"""Makes sure vlan_interface flag overrides network bridge_interface.
Allows heterogeneous networks a la bug 833426
"""
driver = linux_net.LinuxBridgeInterfaceDriver()
info = {}
@staticmethod
def test_ensure(vlan, bridge, interface, network, mac_address, mtu):
info['passed_interface'] = interface
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_vlan_bridge', test_ensure)
network = {
"bridge": "br100",
"bridge_interface": "base_interface",
"share_address": False,
"vlan": "fake"
}
self.flags(vlan_interface="")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "base_interface")
self.flags(vlan_interface="override_interface")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "override_interface")
driver.plug(network, "fakemac")
def test_flat_override(self):
"""Makes sure flat_interface flag overrides network bridge_interface.
Allows heterogeneous networks a la bug 833426
"""
driver = linux_net.LinuxBridgeInterfaceDriver()
info = {}
@staticmethod
def test_ensure(bridge, interface, network, gateway):
info['passed_interface'] = interface
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_bridge', test_ensure)
network = {
"bridge": "br100",
"bridge_interface": "base_interface",
"share_address": False,
}
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "base_interface")
self.flags(flat_interface="override_interface")
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "override_interface")
def _test_dnsmasq_execute(self, extra_expected=None):
network_ref = {'id': 'fake',
'label': 'fake',
'gateway': '10.0.0.1',
'multi_host': False,
'cidr': '10.0.0.0/24',
'netmask': '255.255.255.0',
'dns1': '8.8.4.4',
'dhcp_start': '1.0.0.2',
'dhcp_server': '10.0.0.1',
'share_address': False}
def fake_execute(*args, **kwargs):
executes.append(args)
return "", ""
def fake_add_dhcp_mangle_rule(*args, **kwargs):
executes.append(args)
self.stubs.Set(linux_net, '_execute', fake_execute)
self.stubs.Set(linux_net, '_add_dhcp_mangle_rule',
fake_add_dhcp_mangle_rule)
self.stubs.Set(os, 'chmod', lambda *a, **kw: None)
self.stubs.Set(linux_net, 'write_to_file', lambda *a, **kw: None)
self.stubs.Set(linux_net, '_dnsmasq_pid_for', lambda *a, **kw: None)
dev = 'br100'
default_domain = CONF.dhcp_domain
for domain in ('', default_domain):
executes = []
self.flags(dhcp_domain=domain)
fixedips = self._get_fixedips(network_ref)
linux_net.restart_dhcp(self.context, dev, network_ref, fixedips)
expected = ['env',
'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile),
'NETWORK_ID=fake',
'dnsmasq',
'--strict-order',
'--bind-interfaces',
'--conf-file=%s' % CONF.dnsmasq_config_file,
'--pid-file=%s' % linux_net._dhcp_file(dev, 'pid'),
'--dhcp-optsfile=%s' % linux_net._dhcp_file(dev, 'opts'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
"--dhcp-range=set:%s,%s,static,%s,%ss" % (network_ref['label'],
network_ref['dhcp_start'],
network_ref['netmask'],
CONF.dhcp_lease_time),
'--dhcp-lease-max=256',
'--dhcp-hostsfile=%s' % linux_net._dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % CONF.dhcpbridge,
'--no-hosts',
'--leasefile-ro']
if CONF.dhcp_domain:
expected.append('--domain=%s' % CONF.dhcp_domain)
if extra_expected:
expected += extra_expected
self.assertEqual([(dev,), tuple(expected)], executes)
def test_dnsmasq_execute(self):
self._test_dnsmasq_execute()
def test_dnsmasq_execute_dns_servers(self):
self.flags(dns_server=['1.1.1.1', '2.2.2.2'])
expected = [
'--no-resolv',
'--server=1.1.1.1',
'--server=2.2.2.2',
]
self._test_dnsmasq_execute(expected)
def test_dnsmasq_execute_use_network_dns_servers(self):
self.flags(use_network_dns_servers=True)
expected = [
'--no-resolv',
'--server=8.8.4.4',
]
self._test_dnsmasq_execute(expected)
def test_isolated_host(self):
self.flags(fake_network=False,
share_dhcp_address=True)
# NOTE(vish): use a fresh copy of the manager for each test
self.stubs.Set(linux_net, 'iptables_manager',
linux_net.IptablesManager())
self.stubs.Set(linux_net, 'binary_name', 'test')
executes = []
def fake_execute(*args, **kwargs):
executes.append(args)
return "", ""
self.stubs.Set(utils, 'execute', fake_execute)
driver = linux_net.LinuxBridgeInterfaceDriver()
@staticmethod
def fake_ensure(bridge, interface, network, gateway):
return bridge
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'ensure_bridge', fake_ensure)
iface = 'eth0'
dhcp = '192.168.1.1'
network = {'dhcp_server': dhcp,
'share_address': False,
'bridge': 'br100',
'bridge_interface': iface}
driver.plug(network, 'fakemac')
expected = [
('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-i',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'FORWARD', '-p', 'IPv4', '-i',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-o',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
('ebtables', '-t', 'filter', '-I', 'FORWARD', '-p', 'IPv4', '-o',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
('iptables-save', '-c'),
('iptables-restore', '-c'),
('ip6tables-save', '-c'),
('ip6tables-restore', '-c'),
]
self.assertEqual(executes, expected)
executes = []
@staticmethod
def fake_remove(bridge, gateway):
return
self.stubs.Set(linux_net.LinuxBridgeInterfaceDriver,
'remove_bridge', fake_remove)
driver.unplug(network)
expected = [
('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-i',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
('ebtables', '-t', 'filter', '-D', 'FORWARD', '-p', 'IPv4', '-o',
iface, '--ip-protocol', 'udp', '--ip-destination-port', '67:68',
'-j', 'DROP'),
]
self.assertEqual(executes, expected)
def _test_initialize_gateway(self, existing, expected, routes=''):
self.flags(fake_network=False)
executes = []
def fake_execute(*args, **kwargs):
executes.append(args)
if args[0] == 'ip' and args[1] == 'addr' and args[2] == 'show':
return existing, ""
if args[0] == 'ip' and args[1] == 'route' and args[2] == 'show':
return routes, ""
if args[0] == 'sysctl':
return '1\n', ''
self.stubs.Set(utils, 'execute', fake_execute)
network = {'dhcp_server': '192.168.1.1',
'cidr': '192.168.1.0/24',
'broadcast': '192.168.1.255',
'cidr_v6': '2001:db8::/64'}
self.driver.initialize_gateway_device('eth0', network)
self.assertEqual(executes, expected)
def test_initialize_gateway_moves_wrong_ip(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-n', 'net.ipv4.ip_forward'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_resets_route(self):
routes = ("default via 192.168.0.1 dev eth0\n"
"192.168.100.0/24 via 192.168.0.254 dev eth0 proto static\n")
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-n', 'net.ipv4.ip_forward'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'route', 'del', 'default', 'dev', 'eth0'),
('ip', 'route', 'del', '192.168.100.0/24', 'dev', 'eth0'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'route', 'add', 'default', 'via', '192.168.0.1',
'dev', 'eth0'),
('ip', 'route', 'add', '192.168.100.0/24', 'via', '192.168.0.254',
'dev', 'eth0', 'proto', 'static'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected, routes)
def test_initialize_gateway_no_move_right_ip(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.1.1/24 brd 192.168.1.255 scope global eth0\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-n', 'net.ipv4.ip_forward'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_add_if_blank(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('sysctl', '-n', 'net.ipv4.ip_forward'),
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'route', 'show', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
]
self._test_initialize_gateway(existing, expected)
def test_ensure_floating_no_duplicate_forwards(self):
ln = linux_net
self.stubs.Set(ln.iptables_manager, 'apply', lambda: None)
self.stubs.Set(ln, 'ensure_ebtables_rules', lambda *a, **kw: None)
net = {'bridge': 'br100', 'cidr': '10.0.0.0/24'}
ln.ensure_floating_forward('10.10.10.10', '10.0.0.1', 'eth0', net)
ln.ensure_floating_forward('10.10.10.11', '10.0.0.10', 'eth0', net)
two_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules)
ln.ensure_floating_forward('10.10.10.10', '10.0.0.3', 'eth0', net)
dup_forward_rules = len(linux_net.iptables_manager.ipv4['nat'].rules)
self.assertEqual(two_forward_rules, dup_forward_rules)
def test_apply_ran(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = False
self.mox.StubOutWithMock(manager, '_apply')
manager._apply()
self.mox.ReplayAll()
empty_ret = manager.apply()
self.assertIsNone(empty_ret)
def test_apply_not_run(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = True
self.mox.StubOutWithMock(manager, '_apply')
self.mox.ReplayAll()
manager.apply()
def test_deferred_unset_apply_ran(self):
manager = linux_net.IptablesManager()
manager.iptables_apply_deferred = True
self.mox.StubOutWithMock(manager, '_apply')
manager._apply()
self.mox.ReplayAll()
manager.defer_apply_off()
self.assertFalse(manager.iptables_apply_deferred)
def _test_add_metadata_accept_rule(self, expected):
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'INPUT')
self.assertEqual(expected, rule)
self.stubs.Set(linux_net.iptables_manager.ipv4['filter'],
'add_rule', verify_add_rule)
linux_net.metadata_accept()
def test_metadata_accept(self):
self.flags(metadata_port='8775')
self.flags(metadata_host='10.10.10.1')
expected = ('-s 0.0.0.0/0 -p tcp -m tcp --dport 8775 '
'-d 10.10.10.1 -j ACCEPT')
self._test_add_metadata_accept_rule(expected)
def test_metadata_accept_localhost(self):
self.flags(metadata_port='8775')
self.flags(metadata_host='127.0.0.1')
expected = ('-s 0.0.0.0/0 -p tcp -m tcp --dport 8775 '
'-m addrtype --dst-type LOCAL -j ACCEPT')
self._test_add_metadata_accept_rule(expected)
def _test_add_metadata_forward_rule(self, expected):
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'PREROUTING')
self.assertEqual(expected, rule)
self.stubs.Set(linux_net.iptables_manager.ipv4['nat'],
'add_rule', verify_add_rule)
linux_net.metadata_forward()
def test_metadata_forward(self):
self.flags(metadata_port='8775')
self.flags(metadata_host='10.10.10.1')
expected = ('-s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp '
'--dport 80 -j DNAT --to-destination 10.10.10.1:8775')
self._test_add_metadata_forward_rule(expected)
def test_metadata_forward_localhost(self):
self.flags(metadata_port='8775')
self.flags(metadata_host='127.0.0.1')
expected = ('-s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp '
'--dport 80 -j REDIRECT --to-ports 8775')
self._test_add_metadata_forward_rule(expected)
def test_ensure_bridge_brings_up_interface(self):
calls = {
'device_exists': [mock.call('bridge')],
'_execute': [
mock.call('brctl', 'addif', 'bridge', 'eth0',
run_as_root=True, check_exit_code=False),
mock.call('ip', 'link', 'set', 'eth0', 'up',
run_as_root=True, check_exit_code=False),
mock.call('ip', 'route', 'show', 'dev', 'eth0'),
mock.call('ip', 'addr', 'show', 'dev', 'eth0', 'scope',
'global'),
]
}
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists', return_value=True),
mock.patch.object(linux_net, '_execute', return_value=('', ''))
) as (device_exists, _execute):
driver = linux_net.LinuxBridgeInterfaceDriver()
driver.ensure_bridge('bridge', 'eth0')
device_exists.assert_has_calls(calls['device_exists'])
_execute.assert_has_calls(calls['_execute'])
def test_ensure_bridge_brclt_addif_exception(self):
def fake_execute(*cmd, **kwargs):
if ('brctl', 'addif', 'bridge', 'eth0') == cmd:
return ('', 'some error happens')
else:
return ('', '')
with contextlib.nested(
mock.patch.object(linux_net, 'device_exists', return_value=True),
mock.patch.object(linux_net, '_execute', fake_execute)
) as (device_exists, _):
driver = linux_net.LinuxBridgeInterfaceDriver()
self.assertRaises(exception.NovaException,
driver.ensure_bridge, 'bridge', 'eth0')
device_exists.assert_called_once_with('bridge')
def test_set_device_mtu_configured(self):
self.flags(network_device_mtu=10000)
calls = [
mock.call('ip', 'link', 'set', 'fake-dev', 'mtu',
10000, run_as_root=True,
check_exit_code=[0, 2, 254])
]
with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
linux_net._set_device_mtu('fake-dev')
ex.assert_has_calls(calls)
def test_set_device_mtu_default(self):
calls = []
with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
linux_net._set_device_mtu('fake-dev')
ex.assert_has_calls(calls)
def _ovs_vif_port(self, calls):
with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
linux_net.create_ovs_vif_port('fake-bridge', 'fake-dev',
'fake-iface-id', 'fake-mac',
'fake-instance-uuid')
ex.assert_has_calls(calls)
def test_ovs_vif_port(self):
calls = [
mock.call('ovs-vsctl', '--timeout=120', '--', '--if-exists',
'del-port', 'fake-dev', '--', 'add-port',
'fake-bridge', 'fake-dev',
'--', 'set', 'Interface', 'fake-dev',
'external-ids:iface-id=fake-iface-id',
'external-ids:iface-status=active',
'external-ids:attached-mac=fake-mac',
'external-ids:vm-uuid=fake-instance-uuid',
run_as_root=True)
]
self._ovs_vif_port(calls)
def test_ovs_vif_port_with_mtu(self):
self.flags(network_device_mtu=10000)
calls = [
mock.call('ovs-vsctl', '--timeout=120', '--', '--if-exists',
'del-port', 'fake-dev', '--', 'add-port',
'fake-bridge', 'fake-dev',
'--', 'set', 'Interface', 'fake-dev',
'external-ids:iface-id=fake-iface-id',
'external-ids:iface-status=active',
'external-ids:attached-mac=fake-mac',
'external-ids:vm-uuid=fake-instance-uuid',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev', 'mtu',
10000, run_as_root=True,
check_exit_code=[0, 2, 254])
]
self._ovs_vif_port(calls)
def _create_veth_pair(self, calls):
with mock.patch.object(utils, 'execute', return_value=('', '')) as ex:
linux_net._create_veth_pair('fake-dev1', 'fake-dev2')
ex.assert_has_calls(calls)
def test_create_veth_pair(self):
calls = [
mock.call('ip', 'link', 'add', 'fake-dev1', 'type', 'veth',
'peer', 'name', 'fake-dev2', run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev1', 'up',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev1', 'promisc', 'on',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev2', 'up',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev2', 'promisc', 'on',
run_as_root=True)
]
self._create_veth_pair(calls)
def test_create_veth_pair_with_mtu(self):
self.flags(network_device_mtu=10000)
calls = [
mock.call('ip', 'link', 'add', 'fake-dev1', 'type', 'veth',
'peer', 'name', 'fake-dev2', run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev1', 'up',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev1', 'promisc', 'on',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev1', 'mtu',
10000, run_as_root=True,
check_exit_code=[0, 2, 254]),
mock.call('ip', 'link', 'set', 'fake-dev2', 'up',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev2', 'promisc', 'on',
run_as_root=True),
mock.call('ip', 'link', 'set', 'fake-dev2', 'mtu',
10000, run_as_root=True,
check_exit_code=[0, 2, 254])
]
self._create_veth_pair(calls)
def test_exec_ebtables_success(self):
executes = []
def fake_execute(*args, **kwargs):
executes.append(args)
return "", ""
self.stubs.Set(self.driver, '_execute', fake_execute)
self.driver._exec_ebtables('fake')
self.assertEqual(1, len(executes))
self.mox.UnsetStubs()
def test_exec_ebtables_fail_all(self):
executes = []
def fake_sleep(interval):
pass
def fake_execute(*args, **kwargs):
executes.append(args)
raise processutils.ProcessExecutionError('error')
self.stubs.Set(time, 'sleep', fake_sleep)
self.stubs.Set(self.driver, '_execute', fake_execute)
self.assertRaises(processutils.ProcessExecutionError,
self.driver._exec_ebtables, 'fake')
max_calls = CONF.ebtables_exec_attempts
self.assertEqual(max_calls, len(executes))
self.mox.UnsetStubs()
def test_exec_ebtables_fail_once(self):
executes = []
def fake_sleep(interval):
pass
def fake_execute(*args, **kwargs):
executes.append(args)
if len(executes) == 1:
raise processutils.ProcessExecutionError('error')
else:
return "", ""
self.stubs.Set(time, 'sleep', fake_sleep)
self.stubs.Set(self.driver, '_execute', fake_execute)
self.driver._exec_ebtables('fake')
self.assertEqual(2, len(executes))
self.mox.UnsetStubs()
|
|
# encoding: utf-8
"""
Image part objects, including Image
"""
import hashlib
import os
import posixpath
try:
from PIL import Image as PIL_Image
except ImportError:
import Image as PIL_Image
from StringIO import StringIO
from pptx.opc.package import Part
from pptx.opc.packuri import PackURI
from pptx.opc.spec import image_content_types
from pptx.parts.part import PartCollection
from pptx.util import Px
class Image(Part):
"""
Return new Image part instance. *file* may be |None|, a path to a file (a
string), or a file-like object. If *file* is |None|, no image is loaded
and :meth:`_load` must be called before using the instance. Otherwise, the
file referenced or contained in *file* is loaded. Corresponds to package
files ppt/media/image[1-9][0-9]*.*.
"""
def __init__(self, partname, content_type, blob, ext, filepath=None):
super(Image, self).__init__(partname, content_type, blob)
self._ext = ext
self._filepath = filepath
@classmethod
def new(cls, partname, img_file):
"""
Return a new Image part instance from *img_file*, which may be a path
to a file (a string), or a file-like object. Corresponds to package
files ppt/media/image[1-9][0-9]*.*.
"""
filepath, ext, content_type, blob = cls._load_from_file(img_file)
image = cls(partname, content_type, blob, ext, filepath)
return image
@property
def ext(self):
"""
Return file extension for this image e.g. ``'png'``.
"""
return self._ext
@classmethod
def load(cls, partname, content_type, blob, package):
ext = posixpath.splitext(partname)[1]
return cls(partname, content_type, blob, ext)
@property
def _desc(self):
"""
Return filename associated with this image, either the filename of the
original image file the image was created with or a synthetic name of
the form ``image.ext`` where ``ext`` is appropriate to the image file
format, e.g. ``'jpg'``.
"""
if self._filepath is not None:
return os.path.split(self._filepath)[1]
# return generic filename if original filename is unknown
return 'image.%s' % self.ext
@staticmethod
def _ext_from_image_stream(stream):
"""
Return the filename extension appropriate to the image file contained
in *stream*.
"""
ext_map = {
'GIF': 'gif', 'JPEG': 'jpg', 'PNG': 'png', 'TIFF': 'tiff',
'WMF': 'wmf'
}
stream.seek(0)
format = PIL_Image.open(stream).format
if format not in ext_map:
tmpl = "unsupported image format, expected one of: %s, got '%s'"
raise ValueError(tmpl % (ext_map.keys(), format))
return ext_map[format]
@staticmethod
def _image_ext_content_type(ext):
"""
Return the content type corresponding to filename extension *ext*
"""
key = ext.lower()
if key not in image_content_types:
tmpl = "unsupported image file extension '%s'"
raise ValueError(tmpl % (ext))
content_type = image_content_types[key]
return content_type
@classmethod
def _load_from_file(cls, img_file):
"""
Load image from *img_file*, which is either a path to an image file
or a file-like object.
"""
if isinstance(img_file, basestring): # img_file is a path
filepath = img_file
ext = os.path.splitext(filepath)[1][1:]
content_type = cls._image_ext_content_type(ext)
with open(filepath, 'rb') as f:
blob = f.read()
else: # assume img_file is a file-like object
filepath = None
ext = cls._ext_from_image_stream(img_file)
content_type = cls._image_ext_content_type(ext)
img_file.seek(0)
blob = img_file.read()
return filepath, ext, content_type, blob
def _scale(self, width, height):
"""
Return scaled image dimensions based on supplied parameters. If
*width* and *height* are both |None|, the native image size is
returned. If neither *width* nor *height* is |None|, their values are
returned unchanged. If a value is provided for either *width* or
*height* and the other is |None|, the dimensions are scaled,
preserving the image's aspect ratio.
"""
native_width_px, native_height_px = self._size
native_width = Px(native_width_px)
native_height = Px(native_height_px)
if width is None and height is None:
width = native_width
height = native_height
elif width is None:
scaling_factor = float(height) / float(native_height)
width = int(round(native_width * scaling_factor))
elif height is None:
scaling_factor = float(width) / float(native_width)
height = int(round(native_height * scaling_factor))
return width, height
@property
def _sha1(self):
"""Return SHA1 hash digest for image"""
return hashlib.sha1(self._blob).hexdigest()
@property
def _size(self):
"""
Return *width*, *height* tuple representing native dimensions of
image in pixels.
"""
image_stream = StringIO(self._blob)
width_px, height_px = PIL_Image.open(image_stream).size
image_stream.close()
return width_px, height_px
class ImageCollection(PartCollection):
"""
Immutable sequence of images, typically belonging to an instance of
|Package|. An image part containing a particular image blob appears only
once in an instance, regardless of how many times it is referenced by a
pic shape in a slide.
"""
def __init__(self):
super(ImageCollection, self).__init__()
def add_image(self, file):
"""
Return image part containing the image in *file*, which is either a
path to an image file or a file-like object containing an image. If an
image instance containing this same image already exists, that
instance is returned. If it does not yet exist, a new one is created.
"""
# use Image constructor to validate and characterize image file
partname = PackURI('/ppt/media/image1.jpeg') # dummy just for baseURI
image = Image.new(partname, file)
# return matching image if found
for existing_image in self._values:
if existing_image._sha1 == image._sha1:
return existing_image
# otherwise add it to collection and return new image
self._values.append(image)
self._rename_images()
return image
def load(self, parts):
"""
Load the image collection with all the image parts in iterable
*parts*.
"""
def is_image_part(part):
return (
isinstance(part, Image) and
part.partname.startswith('/ppt/media/')
)
for part in parts:
if is_image_part(part):
self.add_part(part)
def _rename_images(self):
"""
Assign partnames like ``/ppt/media/image9.png`` to all images in the
collection. The name portion is always ``image``. The number part
forms a continuous sequence starting at 1 (e.g. 1, 2, 3, ...). The
extension is preserved during renaming.
"""
for idx, image in enumerate(self._values):
partname_str = '/ppt/media/image%d.%s' % (idx+1, image.ext)
image.partname = PackURI(partname_str)
|
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2014 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from __future__ import division, print_function, unicode_literals
import math
from math import pi, atan
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos import euclid
import pyglet
from pyglet.gl import *
from pyglet.window import key
def circle(radius, color):
circumference = 2*math.pi*radius
step_size = 5
steps = max(4, int(circumference / step_size))
adelta = 2 * math.pi / steps
points = [0,0,radius,0]
for step in range(1,steps+1):
x = radius*math.cos(step*adelta)
y = radius*math.sin(step*adelta)
points += [x,y]
num_points = steps+2
vertex_list = pyglet.graphics.vertex_list(num_points,
('v2f', points),
('c4B', list(color)*num_points)
)
return vertex_list
def rectangle(x1, y1, x2, y2, color):
return pyglet.graphics.vertex_list(4,
('v2f', [x1, y1, x2, y1, x2, y2, x1, y2]),
('c4B', color*4)
)
def up_triange(x,y, h, w, color):
return pyglet.graphics.vertex_list(3,
('v2f', [x, y, x-w/2, y+h, x+w/2, y+h]),
('c4B', color*3)
)
def down_triange(x,y, h, w, color):
return pyglet.graphics.vertex_list(3,
('v2f', [x, y, x-w/2, y-h, x+w/2, y-h]),
('c4B', color*3)
)
class Widget(cocos.cocosnode.CocosNode):
def __init__(self):
super(Widget, self).__init__()
self.selected = False
self.hovered = False
def set_hover(self, value):
self.hovered = value
def set_selected(self, position):
pass
def on_dragged(self, dx, dy):
self.x += dx
self.y += dy
def is_mouse_over(self, position):
return False
class BallWidget(Widget):
def __init__(self, radius, color):
super(BallWidget, self).__init__()
self.radius = radius
self.color = color
self.body = circle(radius, color)
self.hover_envelope = circle(radius*1.2, (255,255,0,100))
self.selected_envelope = circle(radius*1.5, (255,255,255,200))
def draw(self):
glPushMatrix()
self.transform()
if self.selected:
self.selected_envelope.draw(GL_TRIANGLE_FAN)
elif self.hovered:
self.hover_envelope.draw(GL_TRIANGLE_FAN)
self.body.draw(GL_TRIANGLE_FAN)
glPopMatrix()
def is_mouse_over(self, position):
px, py = position
x, y = self.position
if (px-x)**2+(py-y)**2 < self.radius**2:
return True
return False
class UILayer(cocos.layer.Layer):
is_event_handler = True
def __init__(self):
super(UILayer, self).__init__()
self.hovering = None
self.hovering_all = []
self.mouse_down = False
self.dragging = False
def on_mouse_motion(self, x, y, dx, dy):
selected = None
self.hovering_all = []
for c in self.get_children():
if isinstance(c, Widget):
if c.is_mouse_over((x,y)):
selected = c
self.hovering_all.append( c )
c.set_hover(False)
if selected:
if self.hovering not in self.hovering_all:
selected.set_hover(True)
self.hovering = selected
else:
self.hovering.set_hover(True)
else:
self.hovering = None
def on_mouse_press(self, *args):
self.mouse_down = True
def on_mouse_release(self, *args):
self.mouse_down = False
self.dragging = False
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
self.dragging = True
if self.hovering:
self.hovering.on_dragged(dx,dy)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
if self.hovering_all and not self.mouse_down:
top = self.hovering_all.pop(0)
self.hovering_all.append(top)
self.hovering.set_hover(False)
self.hovering = self.hovering_all[0]
self.hovering.set_hover(True)
class TimelineModel(object):
def get_markers(self):
pass
def get_duration(self):
pass
def get_position(self):
pass
class TimeLine(Widget):
def __init__(self, model):
super(TimeLine, self).__init__()
self.model = model
x, y = director.get_window_size()
self.x_margin = xm = 20
self.y_margin = ym = 20
self.height = h = 10
self.width = x-2*xm
self.color = 125,0,0,125
self.bar = rectangle( xm, y-ym, x-xm, y-ym-h, self.color)
def draw(self):
# draw bar
self.bar.draw(GL_QUADS)
# draw ticks
d = self.model.get_duration()
if d != 0:
step = 2** ( int(math.log(d, 2)-2) )
p = 0
while p <= d:
self.show_tick( p )
p += step
markers = self.model.get_markers()
markers_pxs = [ self.map_to_pixel(m) for m in markers ]
x, y = director.get_window_size()
ym = self.y_margin
h = self.height
for pixel in markers_pxs:
t = up_triange(pixel, y - ym - h / 2, 10, 10, (100,100,255,255))
t.draw(GL_TRIANGLES)
pixel = self.map_to_pixel( self.model.get_position() )
t = down_triange(pixel, y - ym - h / 2, 10, 10, (255,255,0,255))
t.draw(GL_TRIANGLES)
def map_to_pixel(self, when):
d = self.model.get_duration()
xm = self.x_margin
if d == 0:
return xm
w = self.width
p = (when / d) * w
return xm + p
def show_tick(self, when):
l = self.height + 5
x,y = director.get_window_size()
ym = self.y_margin
p = self.map_to_pixel( when )
# draw line
glColor4ub(128, 128, 128,100)
glLineWidth(1)
glBegin(GL_LINES)
glVertex2f( p, y-ym )
glVertex2f( p, y-ym-l )
glEnd()
# draw label
label = pyglet.text.Label(str(when),
font_name='Monotype',
#font_name='Times New Roman',
font_size=8,
x=p, y=y-ym-l-7,
anchor_x='center', anchor_y='center')
label.draw()
|
|
#!/usr/bin/env python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import os
import argparse
import subprocess
import logging
import shutil
from Bio import SeqIO
from amptk import amptklib
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self,prog):
super(MyFormatter,self).__init__(prog,max_help_position=50)
class colr(object):
GRN = '\033[92m'
END = '\033[0m'
WARN = '\033[93m'
def restricted_float(x):
x = float(x)
if x < 0.0 or x > 1.0:
raise argparse.ArgumentTypeError("%r not in range [0.0, 1.0]"%(x,))
return x
def checkfastqsize(input):
filesize = os.path.getsize(input)
return filesize
def main(args):
parser=argparse.ArgumentParser(prog='amptk-OTU_cluster_ref.py', usage="%(prog)s [options] -i file.demux.fq\n%(prog)s -h for help menu",
description='''Script runs UPARSE OTU clustering.
Requires USEARCH by Robert C. Edgar: http://drive5.com/usearch''',
epilog="""Written by Jon Palmer (2016) [email protected]""",
formatter_class=MyFormatter)
parser.add_argument('-i','--fastq', dest="FASTQ", required=True, help='FASTQ file (Required)')
parser.add_argument('-o','--out', help='Base output name')
parser.add_argument('-e','--maxee', default='1.0', help='Quality trim EE value')
parser.add_argument('-p','--pct_otu', default='97', help="OTU Clustering Percent")
parser.add_argument('--id', default='97', help="Threshold for alignment")
parser.add_argument('-m','--minsize', default='2', help='Min identical seqs to process')
parser.add_argument('-u','--usearch', dest="usearch", default='usearch9', help='USEARCH9 EXE')
parser.add_argument('--map_filtered', action='store_true', help='map quality filtered reads back to OTUs')
parser.add_argument('-d','--db', required=True, help='Reference Database [ITS,ITS1,ITS2,16S,LSU,COI,custom]')
parser.add_argument('--utax_db', help='UTAX Reference Database')
parser.add_argument('--utax_cutoff', default=0.8, type=restricted_float, help='UTAX confidence value threshold.')
parser.add_argument('--utax_level', default='k', choices=['k','p','c','o','f','g','s'], help='UTAX classification level to retain')
parser.add_argument('--mock', default='synmock', help='Spike-in mock community (fasta)')
parser.add_argument('--debug', action='store_true', help='Remove Intermediate Files')
parser.add_argument('--closed_ref_only', action='store_true', help='Only run closed reference clustering')
parser.add_argument('--cpus', type=int, help="Number of CPUs. Default: auto")
args=parser.parse_args(args)
parentdir = os.path.join(os.path.dirname(amptklib.__file__))
#get basename if not args.out passed
if args.out:
base = args.out
else:
if 'demux' in args.FASTQ:
base = os.path.basename(args.FASTQ).split('.demux')[0]
else:
base = os.path.basename(args.FASTQ).split('.f')[0]
taxonomyLookup = {'k': 'Kingdom', 'p': 'Phylum', 'c': 'Class', 'o': 'Order', 'f': 'Family', 'g': 'Genus', 's': 'Species'}
#remove logfile if exists
log_name = base + '.amptk-cluster_ref.log'
if os.path.isfile(log_name):
os.remove(log_name)
amptklib.setupLogging(log_name)
FNULL = open(os.devnull, 'w')
cmd_args = " ".join(sys.argv)+'\n'
amptklib.log.debug(cmd_args)
print("-------------------------------------------------------")
#initialize script, log system info and usearch version
amptklib.SystemInfo()
#Do a version check
usearch = args.usearch
amptklib.versionDependencyChecks(usearch)
#get number of cpus
if args.cpus:
cpus = args.cpus
else:
cpus = amptklib.getCPUS()
#make tmp folder
tmp = base + '_tmp'
if not os.path.exists(tmp):
os.makedirs(tmp)
#Setup DB locations and names, etc
DBdir = os.path.join(parentdir, 'DB')
DataBase = {'ITS1': (os.path.join(DBdir, 'ITS.udb'), os.path.join(DBdir, 'ITS1_UTAX.udb')),
'ITS2': (os.path.join(DBdir, 'ITS.udb'), os.path.join(DBdir, 'ITS2_UTAX.udb')),
'ITS': (os.path.join(DBdir, 'ITS.udb'), os.path.join(DBdir, 'ITS_UTAX.udb')),
'16S': (os.path.join(DBdir, '16S.udb'), os.path.join(DBdir, '16S.udb')),
'LSU': (os.path.join(DBdir, 'LSU.udb'), os.path.join(DBdir, 'LSU_UTAX.udb')),
'COI': (os.path.join(DBdir, 'COI.udb'), os.path.join(DBdir, 'COI_UTAX.udb'))}
#setup refDB
amptklib.log.info("Checking Reference Database")
if args.db in DataBase:
#need to write to fasta from vsearch UDB
DB = os.path.join(tmp, args.db+'.extracted.fa')
cmd = ['vsearch', '--udb2fasta', DataBase.get(args.db)[0], '--output', DB]
amptklib.runSubprocess(cmd, amptklib.log)
else:
DB = os.path.abspath(args.db)
refDB = os.path.join(tmp, 'reference_DB.fa')
if args.mock:
if args.mock == 'synmock':
mock = os.path.join(parentdir, 'DB', 'amptk_synmock.fa')
else:
mock = os.path.abspath(args.mock)
seen = []
with open(refDB, 'w') as output:
if args.mock:
with open(mock) as input1:
for rec in SeqIO.parse(input1, 'fasta'):
if not rec.id in seen:
SeqIO.write(rec, output, 'fasta')
else:
amptklib.log.error("Duplicate ID's in Ref DB: %s, exiting" % rec.id)
sys.exit(1)
with open(DB) as input2:
for rec in SeqIO.parse(input2, 'fasta'):
if not rec.id in seen:
SeqIO.write(rec, output, 'fasta')
else:
amptklib.log.error("Duplicate ID's in Ref DB: %s, exiting" % rec.id)
sys.exit(1)
#get utax_database
if args.db in DataBase:
utaxDB = DataBase.get(args.db)[1]
else:
if not args.closed_ref_only:
if args.utax_db:
utaxDB = os.path.abspath(args.utax_db)
else:
amptklib.log.error("%s not pre-installed DB, must then also specify valid UTAX database via --utax_db" % args.db)
sys.exit(1)
#Count FASTQ records
amptklib.log.info("Loading FASTQ Records")
#convert to FASTA for mapping
orig_fasta = os.path.join(tmp, base+'.orig.fa')
cmd = ['vsearch', '--fastq_filter', args.FASTQ, '--fastaout', orig_fasta, '--fastq_qmax', '55', '--threads', str(cpus)]
amptklib.runSubprocess(cmd, amptklib.log)
orig_total = amptklib.countfasta(orig_fasta)
size = amptklib.checkfastqsize(args.FASTQ)
readablesize = amptklib.convertSize(size)
amptklib.log.info('{0:,}'.format(orig_total) + ' reads (' + readablesize + ')')
#Expected Errors filtering step
filter_out = os.path.join(tmp, base + '.EE' + args.maxee + '.filter.fq')
filter_fasta = os.path.join(tmp, base + '.EE' + args.maxee + '.filter.fa')
amptklib.log.info("Quality Filtering, expected errors < %s" % args.maxee)
cmd = ['vsearch', '--fastq_filter', args.FASTQ, '--fastq_maxee', str(args.maxee), '--fastqout', filter_out, '--fastaout', filter_fasta, '--fastq_qmax', '55', '--threads', str(cpus)]
amptklib.runSubprocess(cmd, amptklib.log)
qtrimtotal = amptklib.countfastq(filter_out)
amptklib.log.info('{0:,}'.format(qtrimtotal) + ' reads passed')
#now run full length dereplication
derep_out = os.path.join(tmp, base + '.EE' + args.maxee + '.derep.fa')
amptklib.log.info("De-replication (remove duplicate reads)")
cmd = ['vsearch', '--derep_fulllength', filter_fasta, '--sizeout', '--output', derep_out, '--threads', str(cpus), '--threads', str(cpus)]
amptklib.runSubprocess(cmd, amptklib.log)
total = amptklib.countfasta(derep_out)
amptklib.log.info('{0:,}'.format(total) + ' reads passed')
#now run sort by size
sort_out = os.path.join(tmp, base + '.EE' + args.maxee + '.sort.fa')
amptklib.log.info("Sorting reads by size: removing reads seen less than %s times" % args.minsize)
cmd = ['vsearch', '--sortbysize', derep_out, '--minsize', args.minsize, '--output', sort_out, '--threads', str(cpus)]
amptklib.runSubprocess(cmd, amptklib.log)
total = amptklib.countfasta(sort_out)
amptklib.log.info('{0:,}'.format(total) + ' reads passed')
#chimera detection
#first run through de novo chimera detection
amptklib.log.info("De novo chimera detection (VSEARCH)")
chimera_out = os.path.join(tmp, base + '.EE' + args.maxee + '.chimera_check.fa')
cmd = ['vsearch', '--uchime_denovo', sort_out, '--relabel', 'Seq', '--sizeout', '--nonchimeras', chimera_out, '--threads', str(cpus)]
amptklib.runSubprocess(cmd, amptklib.log)
total = amptklib.countfasta(chimera_out)
amptklib.log.info('{0:,}'.format(total) + ' reads passed')
#now run uchime_ref
uchime_out = os.path.join(tmp, base + '.EE' + args.maxee + '.uchime.otus.fa')
#now run chimera filtering if all checks out
amptklib.log.info("Chimera Filtering (VSEARCH)")
cmd = ['vsearch', '--mindiv', '1.0', '--uchime_ref', chimera_out, '--db', refDB, '--sizeout', '--nonchimeras', uchime_out, '--threads', str(cpus)]
amptklib.runSubprocess(cmd, amptklib.log)
total = amptklib.countfasta(uchime_out)
amptklib.log.info('{0:,}'.format(total) + ' OTUs passed')
#now run usearch_global versus reference database
align_out = os.path.join(tmp, base + '.align.uc')
pident = int(args.id) * 0.01
amptklib.log.info("Reference Clustering using Global Alignment, %s%% identity" % args.id)
cmd = ['vsearch', '--usearch_global', uchime_out, '--db', refDB, '--id', str(pident), '--output_no_hits', '--top_hits_only', '--notrunclabels', '--uc', align_out, '--threads', str(cpus)]
amptklib.runSubprocess(cmd, amptklib.log)
#parse results
ref_results = {}
nohits = []
with open(align_out, 'r') as alignment:
for line in alignment:
line = line.replace('\n', '')
col = line.split('\t')
counts = col[8].split(';')
counts = int(counts[1].replace('size=', ''))
if col[3] == '*':
nohits.append(col[8])
continue
if float(col[3]) >= float(args.id):
if not col[8] in ref_results:
ref_results[col[8]] = (col[9], col[3], counts)
else:
print("Error: %s duplicated ID" % col[8])
else:
nohits.append(col[8])
#summarize results from first ref clustering
num_refcluster = len(ref_results)
seqs_refcluster = 0
for k,v in list(ref_results.items()):
seqs_refcluster += v[2]
amptklib.log.info("%i OTUs classified " % num_refcluster + "({0:.0f}%".format(seqs_refcluster/float(qtrimtotal)* 100)+ " of reads)")
#get ref clustered hits to file with taxonomy
ref_clustered = os.path.join(tmp, base+'.ref_clustered.fa')
with open(ref_clustered, 'w') as refoutput:
with open(uchime_out, 'r') as input:
otu_counter = 1
for rec in SeqIO.parse(input, 'fasta'):
if rec.id in ref_results:
res = ref_results.get(rec.id)
pident = res[1]
tax = res[0]
newID = 'OTU'+str(otu_counter)+';pident='+pident+';'+tax
rec.id = newID
rec.name = ''
rec.description = ''
SeqIO.write(rec, refoutput, 'fasta')
otu_counter += 1
'''
if not args.closed_ref_only:
#get nohits file to run clustering
utax_ref = os.path.join(tmp, base + '.EE' + args.maxee + '.utax_ref.fa')
with open(utax_ref, 'w') as output:
with open(uchime_out, 'r') as input:
for rec in SeqIO.parse(input, 'fasta'):
if rec.id in nohits:
SeqIO.write(rec, output, 'fasta')
#input needs to be sorted, so
ref_sort = os.path.join(tmp, base+'.utax_ref.sorted.fa')
cmd = ['vsearch', '--sortbysize', utax_ref, '--minsize', args.minsize, '--output', ref_sort, '--threads', str(cpus)]
amptklib.runSubprocess(cmd, amptklib.log)
#now run clustering algorithm on those not found in reference database
radius = str(100 - int(args.pct_otu))
otu_out = os.path.join(tmp, base + '.EE' + args.maxee + '.otus.fa')
amptklib.log.info("De novo Clustering remaining sequences (UPARSE)")
cmd = [usearch, '-cluster_otus', ref_sort, '-relabel', 'OTU', '-otu_radius_pct', radius, '-otus', otu_out]
amptklib.runSubprocess(cmd, amptklib.log)
total = amptklib.countfasta(otu_out)
amptklib.log.info('{0:,}'.format(total) + ' de novo OTUs')
#try utax reference clustering
amptklib.log.info("Reference Clustering de novo OTUs using UTAX")
cmd = [usearch, '-cluster_otus_utax', otu_out, '-db', utaxDB, '-utax_cutoff', str(args.utax_cutoff), '-utax_level', 's', '-strand', 'plus', '-utaxout', os.path.join(tmp, base+'.utax.out')]
amptklib.runSubprocess(cmd, amptklib.log)
#setup tax filtering
tax_values = ['k','p','c','o','f','g','s']
filter_index = tax_values.index(args.utax_level)
filt_tax_values = [s + ':' for s in tax_values[filter_index:]]
#get results from utax
with open(ref_clustered, 'a') as output:
seqDict = SeqIO.index(otu_out, 'fasta')
utaxresults = []
with open(os.path.join(tmp, base+'.utax.out'), 'r') as utax:
for line in utax:
line = line.replace('\n', '')
col = line.split('\t')
ID = col[0]
tax = col[2]
if any(x in tax for x in filt_tax_values):
record = seqDict[ID]
record.id = 'OTU'+str(otu_counter)+';UTAX;tax='+tax
record.name = ''
record.description = ''
SeqIO.write(record, output, 'fasta')
otu_counter += 1
total = amptklib.countfasta(ref_clustered) - num_refcluster
amptklib.log.info('{0:,}'.format(total) + ' classified to %s' % taxonomyLookup.get(args.utax_level))
'''
#clean up padded N's
amptklib.log.info("Cleaning up padding from OTUs")
otu_clean = os.path.join(tmp, base + '.clean.otus.fa')
amptklib.fasta_strip_padding(ref_clustered, otu_clean)
total = amptklib.countfasta(otu_clean)
amptklib.log.info('{0:,}'.format(total) + ' total OTUs')
#now map reads back to OTUs
uc_out = os.path.join(tmp, base + '.EE' + args.maxee + '.mapping.uc')
otu_table = os.path.join(tmp, base + '.EE' + args.maxee + '.otu_table.txt')
#setup reads to map
if args.map_filtered:
reads = filter_fasta
else:
reads = orig_fasta
amptklib.log.info("Mapping Reads to OTUs and Building OTU table")
cmd = ['vsearch', '--usearch_global', reads, '--strand', 'plus', '--id', '0.97', '--db', otu_clean, '--uc', uc_out, '--otutabout', otu_table, '--threads', str(cpus)]
amptklib.runSubprocess(cmd, amptklib.log)
#count reads mapped
total = amptklib.line_count2(uc_out)
amptklib.log.info('{0:,}'.format(total) + ' reads mapped to OTUs '+ '({0:.0f}%)'.format(total/float(orig_total)* 100))
#Move files around, delete tmp if argument passed.
currentdir = os.getcwd()
final_otu = os.path.join(currentdir, base + '.cluster.otus.fa')
shutil.copyfile(otu_clean, final_otu)
final_otu_table = os.path.join(currentdir, base + '.otu_table.txt')
shutil.copyfile(otu_table, final_otu_table)
if not args.debug:
shutil.rmtree(tmp)
#Print location of files to STDOUT
print("-------------------------------------------------------")
print("OTU Clustering Script has Finished Successfully")
print("-------------------------------------------------------")
if not not args.debug:
print("Tmp Folder of files: %s" % tmp)
print("Clustered OTUs: %s" % os.path.basename(final_otu))
print("OTU Table: %s" % os.path.basename(final_otu_table))
print("-------------------------------------------------------")
otu_print = final_otu.split('/')[-1]
tab_print = final_otu_table.split('/')[-1]
if 'darwin' in sys.platform:
print(colr.WARN + "\nExample of next cmd:" + colr.END + " amptk filter -i %s -f %s -b <mock barcode>\n" % (tab_print, otu_print))
else:
print("\nExample of next cmd: amptk filter -i %s -f %s -b <mock barcode>\n" % (tab_print, otu_print))
if __name__ == "__main__":
main(args)
|
|
from collections import OrderedDict
import numpy as np
from gym.spaces import Box, Dict
from multiworld.envs.env_util import get_stat_in_paths, \
create_stats_ordered_dict, get_asset_full_path
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv
class SawyerReachXYZEnv(SawyerXYZEnv, MultitaskEnv):
def __init__(
self,
reward_type='hand_distance',
norm_order=1,
indicator_threshold=0.06,
fix_goal=False,
fixed_goal=(0.15, 0.6, 0.3),
hide_goal_markers=False,
**kwargs
):
self.quick_init(locals())
MultitaskEnv.__init__(self)
SawyerXYZEnv.__init__(self, model_name=self.model_name, **kwargs)
self.reward_type = reward_type
self.norm_order = norm_order
self.indicator_threshold = indicator_threshold
self.fix_goal = fix_goal
self.fixed_goal = np.array(fixed_goal)
self._state_goal = None
self.hide_goal_markers = hide_goal_markers
self.action_space = Box(np.array([-1, -1, -1]), np.array([1, 1, 1]), dtype=np.float32)
self.hand_space = Box(self.hand_low, self.hand_high, dtype=np.float32)
self.observation_space = Dict([
('observation', self.hand_space),
('desired_goal', self.hand_space),
('achieved_goal', self.hand_space),
('state_observation', self.hand_space),
('state_desired_goal', self.hand_space),
('state_achieved_goal', self.hand_space),
('proprio_observation', self.hand_space),
('proprio_desired_goal', self.hand_space),
('proprio_achieved_goal', self.hand_space),
])
self.reset()
def step(self, action):
self.set_xyz_action(action)
# keep gripper closed
self.do_simulation(np.array([1]))
# The marker seems to get reset every time you do a simulation
self._set_goal_marker(self._state_goal)
ob = self._get_obs()
reward = self.compute_reward(action, ob)
info = self._get_info()
done = False
return ob, reward, done, info
def _get_obs(self):
flat_obs = self.get_endeff_pos()
return dict(
observation=flat_obs,
desired_goal=self._state_goal,
achieved_goal=flat_obs,
state_observation=flat_obs,
state_desired_goal=self._state_goal,
state_achieved_goal=flat_obs,
proprio_observation=flat_obs,
proprio_desired_goal=self._state_goal,
proprio_achieved_goal=flat_obs,
)
def _get_info(self):
hand_diff = self._state_goal - self.get_endeff_pos()
hand_distance = np.linalg.norm(hand_diff, ord=self.norm_order)
hand_distance_l1 = np.linalg.norm(hand_diff, ord=1)
hand_distance_l2 = np.linalg.norm(hand_diff, ord=2)
return dict(
hand_distance=hand_distance,
hand_distance_l1=hand_distance_l1,
hand_distance_l2=hand_distance_l2,
hand_success=float(hand_distance < self.indicator_threshold),
)
def _set_goal_marker(self, goal):
"""
This should be use ONLY for visualization. Use self._state_goal for
logging, learning, etc.
"""
self.data.site_xpos[self.model.site_name2id('hand-goal-site')] = (
goal
)
if self.hide_goal_markers:
self.data.site_xpos[self.model.site_name2id('hand-goal-site'), 2] = (
-1000
)
@property
def model_name(self):
return get_asset_full_path('sawyer_xyz/sawyer_reach.xml')
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
self.viewer.cam.lookat[0] = 0
self.viewer.cam.lookat[1] = 1.0
self.viewer.cam.lookat[2] = 0.5
self.viewer.cam.distance = 0.3
self.viewer.cam.elevation = -45
self.viewer.cam.azimuth = 270
self.viewer.cam.trackbodyid = -1
def reset_model(self):
velocities = self.data.qvel.copy()
angles = self.data.qpos.copy()
angles[:7] = [1.7244448, -0.92036369, 0.10234232, 2.11178144, 2.97668632, -0.38664629, 0.54065733]
self.set_state(angles.flatten(), velocities.flatten())
self._reset_hand()
self.set_goal(self.sample_goal())
self.sim.forward()
return self._get_obs()
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', np.array([0, 0.5, 0.02]))
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation(None, self.frame_skip)
"""
Multitask functions
"""
def get_goal(self):
return {
'desired_goal': self._state_goal,
'state_desired_goal': self._state_goal,
}
def set_goal(self, goal):
self._state_goal = goal['state_desired_goal']
self._set_goal_marker(self._state_goal)
def set_to_goal(self, goal):
state_goal = goal['state_desired_goal']
for _ in range(30):
self.data.set_mocap_pos('mocap', state_goal)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
# keep gripper closed
self.do_simulation(np.array([1]))
def sample_goals(self, batch_size):
if self.fix_goal:
goals = np.repeat(
self.fixed_goal.copy()[None],
batch_size,
0
)
else:
goals = np.random.uniform(
self.hand_space.low,
self.hand_space.high,
size=(batch_size, self.hand_space.low.size),
)
return {
'desired_goal': goals,
'state_desired_goal': goals,
}
def compute_rewards(self, actions, obs):
achieved_goals = obs['state_achieved_goal']
desired_goals = obs['state_desired_goal']
hand_pos = achieved_goals
goals = desired_goals
hand_diff = hand_pos - goals
if self.reward_type == 'hand_distance':
r = -np.linalg.norm(hand_diff, ord=self.norm_order, axis=1)
elif self.reward_type == 'vectorized_hand_distance':
r = -np.abs(hand_diff)
elif self.reward_type == 'hand_success':
r = -(np.linalg.norm(hand_diff, ord=self.norm_order, axis=1)
> self.indicator_threshold).astype(float)
else:
raise NotImplementedError("Invalid/no reward type.")
return r
def get_diagnostics(self, paths, prefix=''):
statistics = OrderedDict()
for stat_name in [
'hand_distance',
'hand_distance_l1',
'hand_distance_l2',
'hand_success',
]:
stat_name = stat_name
stat = get_stat_in_paths(paths, 'env_infos', stat_name)
statistics.update(create_stats_ordered_dict(
'%s%s' % (prefix, stat_name),
stat,
always_show_all_stats=True,
))
statistics.update(create_stats_ordered_dict(
'Final %s%s' % (prefix, stat_name),
[s[-1] for s in stat],
always_show_all_stats=True,
))
return statistics
def get_env_state(self):
base_state = super().get_env_state()
goal = self._state_goal.copy()
return base_state, goal
def set_env_state(self, state):
base_state, goal = state
super().set_env_state(base_state)
self._state_goal = goal
self._set_goal_marker(goal)
class SawyerReachXYEnv(SawyerReachXYZEnv):
def __init__(self, *args,
fixed_goal=(0.15, 0.6),
hand_z_position=0.055, **kwargs):
self.quick_init(locals())
SawyerReachXYZEnv.__init__(
self,
*args,
fixed_goal=(fixed_goal[0], fixed_goal[1], hand_z_position),
**kwargs
)
self.hand_z_position = hand_z_position
self.action_space = Box(np.array([-1, -1]), np.array([1, 1]), dtype=np.float32)
self.hand_space = Box(
np.hstack((self.hand_space.low[:2], self.hand_z_position)),
np.hstack((self.hand_space.high[:2], self.hand_z_position)),
dtype=np.float32
)
self.observation_space = Dict([
('observation', self.hand_space),
('desired_goal', self.hand_space),
('achieved_goal', self.hand_space),
('state_observation', self.hand_space),
('state_desired_goal', self.hand_space),
('state_achieved_goal', self.hand_space),
('proprio_observation', self.hand_space),
('proprio_desired_goal', self.hand_space),
('proprio_achieved_goal', self.hand_space),
])
def step(self, action):
delta_z = self.hand_z_position - self.data.mocap_pos[0, 2]
action = np.hstack((action, delta_z))
return super().step(action)
|
|
import unittest
import datetime
import json
from restosaur import API, responses
from restosaur.resource import Resource
from restosaur.dispatch import resource_dispatcher_factory
from django.test import SimpleTestCase
class ResourceTestCase(unittest.TestCase):
def setUp(self):
from django.test import RequestFactory
super(ResourceTestCase, self).setUp()
self.api = API('/')
self.rqfactory = RequestFactory()
def call(self, resource, method, *args, **kw):
rq = getattr(self.rqfactory, method)(resource.path, *args, **kw)
return resource_dispatcher_factory(self.api, resource)(rq)
class DefaultRepresentationTestCase(ResourceTestCase):
def setUp(self):
super(DefaultRepresentationTestCase, self).setUp()
self.entity = self.api.resource('entity')
@self.entity.get()
def entity_GET(ctx):
return ctx.Entity({'some':'test'})
def test_successful_getting_200_status_code(self):
resp = self.call(self.entity, 'get')
self.assertEqual(resp.status_code, 200)
def test_returning_valid_content_type(self):
resp = self.call(self.entity, 'get')
self.assertEqual(resp['Content-Type'], 'application/json')
def test_getting_valid_entity_content(self):
resp = self.call(self.entity, 'get')
resp_json = json.loads(resp.content)
self.assertTrue(resp_json['some'] == 'test')
def test_raising_not_acceptable_for_unsupported_representation(self):
resp = self.call(self.entity, 'get', HTTP_ACCEPT='application/vnd.not-defined+json')
self.assertEqual(resp.status_code, 406)
def test_raising_not_acceptable_for_unsupported_serializer(self):
resp = self.call(self.entity, 'get', HTTP_ACCEPT='application/eggsandmeat')
self.assertEqual(resp.status_code, 406)
def test_returning_fallback_application_json_content_type_for_unsupported_serializer(self):
resp = self.call(self.entity, 'get', HTTP_ACCEPT='application/eggsandmeat')
self.assertEqual(resp['Content-Type'], 'application/json')
class SeeOtherTestCase(ResourceTestCase):
def setUp(self):
super(SeeOtherTestCase, self).setUp()
self.seeother = self.api.resource('seeother')
@self.seeother.get()
def seeother_GET(ctx):
return ctx.SeeOther('https://google.com')
def test_that_seeother_accepts_any_content_type(self):
resp = self.call(self.seeother, 'get', HTTP_ACCEPT='application/vnd.not-defined+json')
self.assertEqual(resp.status_code, 303)
def test_that_seeother_sends_back_location_header(self):
resp = self.call(self.seeother, 'get')
self.assertEqual(resp['Location'], 'https://google.com')
def test_that_seeother_returns_no_content(self):
resp = self.call(self.seeother, 'get')
self.assertEqual(resp.content, '')
def test_that_seeother_returns_application_json_content_type(self):
resp = self.call(self.seeother, 'get')
self.assertEqual(resp['Content-Type'], 'application/json')
class NotFoundTestCase(ResourceTestCase):
def setUp(self):
super(NotFoundTestCase, self).setUp()
self.resource_exc = self.api.resource('notfound_exc')
self.resource = self.api.resource('notfound')
@self.resource.get()
def notfound_GET(ctx):
return ctx.NotFound()
@self.resource_exc.get()
def notfoundexc_GET(ctx):
from django.http import Http404
raise Http404
def test_returning_404_code_when_handling_django_Http404_exception_and(self):
resp = self.call(self.resource_exc, 'get')
self.assertEqual(resp.status_code, 404)
def test_valid_content_type_when_handling_django_Http404_exception_and(self):
resp = self.call(self.resource_exc, 'get')
self.assertEqual(resp['Content-Type'], 'application/json')
def test_returning_404_code_when_returning_NotFoundResponse(self):
resp = self.call(self.resource, 'get')
self.assertEqual(resp.status_code, 404)
def test_valid_content_type_when_returning_NotFoundResponse(self):
resp = self.call(self.resource, 'get')
self.assertEqual(resp['Content-Type'], 'application/json')
class BadRequestTestCase(ResourceTestCase):
def setUp(self):
super(BadRequestTestCase, self).setUp()
self.resource = self.api.resource('badrequest')
@self.resource.get()
def badrequest_GET(ctx):
return ctx.BadRequest()
def test_returning_400_code_when_returning_BadRequestResponse(self):
resp = self.call(self.resource, 'get')
self.assertEqual(resp.status_code, 400)
class MethodNotAllowedTestCase(ResourceTestCase):
def setUp(self):
super(MethodNotAllowedTestCase, self).setUp()
self.empty_resource = self.api.resource('empty')
def test_that_empty_resource_raises_method_not_allowed_for_GET(self):
resp = self.call(self.empty_resource, 'get')
self.assertEqual(resp.status_code, 405)
def test_that_empty_resource_raises_method_not_allowed_for_POST(self):
resp = self.call(self.empty_resource, 'post')
self.assertEqual(resp.status_code, 405)
def test_that_empty_resource_raises_method_not_allowed_for_PUT(self):
resp = self.call(self.empty_resource, 'put')
self.assertEqual(resp.status_code, 405)
def test_that_empty_resource_raises_method_not_allowed_for_PATCH(self):
resp = self.call(self.empty_resource, 'patch')
self.assertEqual(resp.status_code, 405)
def test_that_empty_resource_raises_method_not_allowed_for_DELETE(self):
resp = self.call(self.empty_resource, 'delete')
self.assertEqual(resp.status_code, 405)
def test_that_empty_resource_raises_method_not_allowed_for_OPTIONS(self):
resp = self.call(self.empty_resource, 'options')
self.assertEqual(resp.status_code, 405)
class MethodsHandlingTestCase(ResourceTestCase):
def setUp(self):
super(MethodsHandlingTestCase, self).setUp()
self.get = self.api.resource('get')
self.post = self.api.resource('post')
self.delete = self.api.resource('delete')
self.patch = self.api.resource('patch')
self.put = self.api.resource('put')
self.options = self.api.resource('options')
@self.post.post()
@self.get.get()
@self.patch.patch()
@self.put.put()
@self.options.options()
def response_200_OK(ctx):
return ctx.Response()
def test_succesful_handling_registered_GET(self):
resp = self.call(self.get, 'get')
self.assertEqual(resp.status_code, 200)
def test_succesful_handling_registered_POST(self):
resp = self.call(self.post, 'post')
self.assertEqual(resp.status_code, 200)
def test_succesful_handling_registered_PUT(self):
resp = self.call(self.put, 'put')
self.assertEqual(resp.status_code, 200)
def test_succesful_handling_registered_PATCH(self):
resp = self.call(self.patch, 'patch')
self.assertEqual(resp.status_code, 200)
def test_succesful_handling_registered_OPTIONS(self):
resp = self.call(self.options, 'options')
self.assertEqual(resp.status_code, 200)
def test_not_handling_notregistered_POST(self):
for resource in (self.get, self.put, self.patch, self.options):
resp = self.call(resource, 'post')
self.assertEqual(resp.status_code, 405)
def test_not_handling_notregistered_PUT(self):
for resource in (self.get, self.post, self.patch, self.options):
resp = self.call(resource, 'put')
self.assertEqual(resp.status_code, 405)
def test_not_handling_notregistered_GET(self):
for resource in (self.put, self.post, self.patch, self.options):
resp = self.call(resource, 'get')
self.assertEqual(resp.status_code, 405)
def test_not_handling_notregistered_PATCH(self):
for resource in (self.put, self.post, self.get, self.options):
resp = self.call(resource, 'patch')
self.assertEqual(resp.status_code, 405)
def test_not_handling_notregistered_OPTIONS(self):
for resource in (self.put, self.post, self.get, self.patch):
resp = self.call(resource, 'options')
self.assertEqual(resp.status_code, 405)
class ExceptionsHandlingTestCase(ResourceTestCase, SimpleTestCase):
def setUp(self):
super(ExceptionsHandlingTestCase, self).setUp()
self.exc_resource = self.api.resource('exception')
self.notimpl_resource = self.api.resource('not-implemented')
@self.exc_resource.get()
def raise_some_exception(ctx):
raise Exception('Test exception')
@self.notimpl_resource.get()
def raise_not_impl_exception(ctx):
raise NotImplementedError('This code is not implemented')
def test_successful_returning_internal_server_error_status_500(self):
resp = self.call(self.exc_resource, 'get')
self.assertEqual(resp.status_code, 500)
def test_successful_returning_internal_server_error_message(self):
resp = self.call(self.exc_resource, 'get')
resp_json = json.loads(resp.content)
self.assertEqual(resp_json['error'], 'Test exception')
def test_not_returning_internal_server_error_traceback_when_debug_is_off(self):
with self.settings(DEBUG=False):
resp = self.call(self.exc_resource, 'get')
resp_json = json.loads(resp.content)
self.assertFalse('traceback' in resp_json)
def test_successful_returning_internal_server_error_traceback_when_debug_is_on(self):
with self.settings(DEBUG=True):
resp = self.call(self.exc_resource, 'get')
resp_json = json.loads(resp.content)
self.assertTrue('traceback' in resp_json)
def test_returning_internal_server_error_traceback_as_list(self):
with self.settings(DEBUG=True):
resp = self.call(self.exc_resource, 'get')
resp_json = json.loads(resp.content)
self.assertTrue(isinstance(resp_json['traceback'],list))
def test_returning_valid_internal_server_error_traceback_entity(self):
with self.settings(DEBUG=True):
resp = self.call(self.exc_resource, 'get')
resp_json = json.loads(resp.content)
entity = resp_json['traceback'][0]
self.assertTrue('source' in entity)
self.assertTrue('line' in entity)
self.assertTrue('fn' in entity)
self.assertTrue('file' in entity)
def test_successful_returning_not_implemented_error_message(self):
resp = self.call(self.notimpl_resource, 'get')
resp_json = json.loads(resp.content)
self.assertEqual(resp_json['error'], 'This code is not implemented')
def test_successful_returning_not_implemented_error_status_501(self):
resp = self.call(self.notimpl_resource, 'get')
self.assertEqual(resp.status_code, 501)
|
|
import HTMLParser
import json
from xml.etree import ElementTree
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseBadRequest, Http404
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.cache import cache_page
from django.views.generic import View
from couchdbkit import ResourceConflict
from casexml.apps.case.models import CASE_STATUS_OPEN
from casexml.apps.case.xml import V2
from casexml.apps.phone.fixtures import generator
from corehq.form_processor.utils import should_use_sql_backend
from corehq.form_processor.utils.general import use_sqlite_backend
from dimagi.utils.logging import notify_exception
from dimagi.utils.parsing import string_to_boolean
from dimagi.utils.web import json_response, get_url_base, json_handler
from touchforms.formplayer.api import DjangoAuth, get_raw_instance, sync_db
from touchforms.formplayer.models import EntrySession
from xml2json.lib import xml2json
from corehq import toggles, privileges
from corehq.apps.accounting.decorators import requires_privilege_for_commcare_user, requires_privilege_with_fallback
from corehq.apps.app_manager.dbaccessors import (
get_latest_build_doc,
get_brief_apps_in_domain,
get_latest_released_app_doc,
get_app_ids_in_domain,
get_current_app,
wrap_app,
)
from corehq.apps.app_manager.exceptions import FormNotFoundException, ModuleNotFoundException
from corehq.apps.app_manager.models import Application, ApplicationBase, RemoteApp
from corehq.apps.app_manager.suite_xml.sections.details import get_instances_for_module
from corehq.apps.app_manager.suite_xml.sections.entries import EntriesHelper
from corehq.apps.app_manager.util import get_cloudcare_session_data
from corehq.apps.cloudcare.api import (
api_closed_to_status,
CaseAPIResult,
get_app_json,
get_filtered_cases,
get_filters_from_request_params,
get_open_form_sessions,
look_up_app_json,
)
from corehq.apps.cloudcare.dbaccessors import get_cloudcare_apps
from corehq.apps.cloudcare.decorators import require_cloudcare_access
from corehq.apps.cloudcare.exceptions import RemoteAppError
from corehq.apps.cloudcare.models import ApplicationAccess
from corehq.apps.cloudcare.touchforms_api import BaseSessionDataHelper, CaseSessionDataHelper
from corehq.apps.domain.decorators import login_and_domain_required, login_or_digest_ex, domain_admin_required
from corehq.apps.groups.models import Group
from corehq.apps.reports.formdetails import readable
from corehq.apps.style.decorators import (
use_datatables,
use_jquery_ui,
)
from corehq.apps.users.models import CouchUser, CommCareUser
from corehq.apps.users.views import BaseUserSettingsView
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors, FormAccessors, LedgerAccessors
from corehq.form_processor.exceptions import XFormNotFound, CaseNotFound
from corehq.util.quickcache import skippable_quickcache
from corehq.util.xml_utils import indent_xml
from corehq.apps.analytics.tasks import track_clicked_preview_on_hubspot
from corehq.apps.analytics.utils import get_meta
@require_cloudcare_access
def default(request, domain):
return HttpResponseRedirect(reverse('cloudcare_main', args=[domain, '']))
def insufficient_privilege(request, domain, *args, **kwargs):
context = {
'domain': domain,
}
return render(request, "cloudcare/insufficient_privilege.html", context)
class CloudcareMain(View):
@use_datatables
@use_jquery_ui
@method_decorator(require_cloudcare_access)
@method_decorator(requires_privilege_for_commcare_user(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(CloudcareMain, self).dispatch(request, *args, **kwargs)
def get(self, request, domain, urlPath):
try:
preview = string_to_boolean(request.GET.get("preview", "false"))
except ValueError:
# this is typically only set at all if it's intended to be true so this
# is a reasonable default for "something went wrong"
preview = True
app_access = ApplicationAccess.get_by_domain(domain)
accessor = CaseAccessors(domain)
if not preview:
apps = get_cloudcare_apps(domain)
if request.project.use_cloudcare_releases:
if (toggles.CLOUDCARE_LATEST_BUILD.enabled(domain) or
toggles.CLOUDCARE_LATEST_BUILD.enabled(request.couch_user.username)):
get_cloudcare_app = get_latest_build_doc
else:
get_cloudcare_app = get_latest_released_app_doc
apps = map(
lambda app: get_cloudcare_app(domain, app['_id']),
apps,
)
apps = filter(None, apps)
apps = map(wrap_app, apps)
# convert to json
apps = [get_app_json(app) for app in apps]
else:
# legacy functionality - use the latest build regardless of stars
apps = [get_latest_build_doc(domain, app['_id']) for app in apps]
apps = [get_app_json(ApplicationBase.wrap(app)) for app in apps if app]
else:
# big TODO: write a new apps view for Formplayer, can likely cut most out now
if toggles.USE_FORMPLAYER_FRONTEND.enabled(domain):
apps = get_cloudcare_apps(domain)
else:
apps = get_brief_apps_in_domain(domain)
apps = [get_app_json(app) for app in apps if app and (
isinstance(app, RemoteApp) or app.application_version == V2)]
meta = get_meta(request)
track_clicked_preview_on_hubspot(request.couch_user, request.COOKIES, meta)
# trim out empty apps
apps = filter(lambda app: app, apps)
apps = filter(lambda app: app_access.user_can_access_app(request.couch_user, app), apps)
def _default_lang():
if apps:
# unfortunately we have to go back to the DB to find this
return Application.get(apps[0]["_id"]).default_language
else:
return "en"
# default language to user's preference, followed by
# first app's default, followed by english
language = request.couch_user.language or _default_lang()
def _url_context():
# given a url path, returns potentially the app, parent, and case, if
# they're selected. the front end optimizes with these to avoid excess
# server calls
# there's an annoying dependency between this logic and backbone's
# url routing that seems hard to solve well. this needs to be synced
# with apps.js if anything changes
# for apps anything with "view/app/" works
# for cases it will be:
# "view/:app/:module/:form/case/:case/"
# if there are parent cases, it will be:
# "view/:app/:module/:form/parent/:parent/case/:case/
# could use regex here but this is actually simpler with the potential
# absence of a trailing slash
split = urlPath.split('/')
app_id = split[1] if len(split) >= 2 else None
if len(split) >= 5 and split[4] == "parent":
parent_id = split[5]
case_id = split[7] if len(split) >= 7 else None
else:
parent_id = None
case_id = split[5] if len(split) >= 6 else None
app = None
if app_id:
if app_id in [a['_id'] for a in apps]:
app = look_up_app_json(domain, app_id)
else:
messages.info(request, _("That app is no longer valid. Try using the "
"navigation links to select an app."))
if app is None and len(apps) == 1:
app = look_up_app_json(domain, apps[0]['_id'])
def _get_case(domain, case_id):
case = accessor.get_case(case_id)
assert case.domain == domain, "case %s not in %s" % (case_id, domain)
return case.to_api_json()
case = _get_case(domain, case_id) if case_id else None
if parent_id is None and case is not None:
parent_id = case.get('indices', {}).get('parent', {}).get('case_id', None)
parent = _get_case(domain, parent_id) if parent_id else None
return {
"app": app,
"case": case,
"parent": parent
}
context = {
"domain": domain,
"language": language,
"apps": apps,
"apps_raw": apps,
"preview": preview,
"maps_api_key": settings.GMAPS_API_KEY,
"sessions_enabled": request.couch_user.is_commcare_user(),
"use_cloudcare_releases": request.project.use_cloudcare_releases,
"username": request.user.username,
"formplayer_url": settings.FORMPLAYER_URL,
'use_sqlite_backend': use_sqlite_backend(domain),
}
context.update(_url_context())
if toggles.USE_FORMPLAYER_FRONTEND.enabled(domain):
return render(request, "cloudcare/formplayer_home.html", context)
else:
return render(request, "cloudcare/cloudcare_home.html", context)
class FormplayerMain(View):
preview = False
urlname = 'formplayer_main'
@use_datatables
@use_jquery_ui
@method_decorator(require_cloudcare_access)
@method_decorator(requires_privilege_for_commcare_user(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(FormplayerMain, self).dispatch(request, *args, **kwargs)
def fetch_app(self, domain, app_id):
username = self.request.couch_user.username
if (toggles.CLOUDCARE_LATEST_BUILD.enabled(domain) or
toggles.CLOUDCARE_LATEST_BUILD.enabled(username)):
return get_latest_build_doc(domain, app_id)
else:
return get_latest_released_app_doc(domain, app_id)
def get(self, request, domain):
app_access = ApplicationAccess.get_by_domain(domain)
app_ids = get_app_ids_in_domain(domain)
apps = map(
lambda app_id: self.fetch_app(domain, app_id),
app_ids,
)
apps = filter(None, apps)
apps = filter(lambda app: app['cloudcare_enabled'] or self.preview, apps)
apps = filter(lambda app: app_access.user_can_access_app(request.couch_user, app), apps)
apps = sorted(apps, key=lambda app: app['name'])
def _default_lang():
try:
return apps[0]['langs'][0]
except Exception:
return 'en'
# default language to user's preference, followed by
# first app's default, followed by english
language = request.couch_user.language or _default_lang()
context = {
"domain": domain,
"language": language,
"apps": apps,
"maps_api_key": settings.GMAPS_API_KEY,
"username": request.user.username,
"formplayer_url": settings.FORMPLAYER_URL,
"single_app_mode": False,
"home_url": reverse(self.urlname, args=[domain]),
}
return render(request, "cloudcare/formplayer_home.html", context)
class FormplayerMainPreview(FormplayerMain):
preview = True
urlname = 'formplayer_main_preview'
def fetch_app(self, domain, app_id):
return get_current_app(domain, app_id)
class FormplayerPreviewSingleApp(View):
urlname = 'formplayer_single_app'
@use_datatables
@use_jquery_ui
@method_decorator(require_cloudcare_access)
@method_decorator(requires_privilege_for_commcare_user(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(FormplayerPreviewSingleApp, self).dispatch(request, *args, **kwargs)
def get(self, request, domain, app_id, **kwargs):
app_access = ApplicationAccess.get_by_domain(domain)
app = get_current_app(domain, app_id)
if not app_access.user_can_access_app(request.couch_user, app):
raise Http404()
def _default_lang():
try:
return app['langs'][0]
except Exception:
return 'en'
# default language to user's preference, followed by
# first app's default, followed by english
language = request.couch_user.language or _default_lang()
context = {
"domain": domain,
"language": language,
"apps": [app],
"maps_api_key": settings.GMAPS_API_KEY,
"username": request.user.username,
"formplayer_url": settings.FORMPLAYER_URL,
"single_app_mode": True,
"home_url": reverse(self.urlname, args=[domain, app_id]),
}
return render(request, "cloudcare/formplayer_home.html", context)
@login_and_domain_required
@requires_privilege_for_commcare_user(privileges.CLOUDCARE)
def form_context(request, domain, app_id, module_id, form_id):
app = Application.get(app_id)
form_url = '{}{}'.format(
settings.CLOUDCARE_BASE_URL or get_url_base(),
reverse('download_xform', args=[domain, app_id, module_id, form_id])
)
case_id = request.GET.get('case_id')
instance_id = request.GET.get('instance_id')
try:
form = app.get_module(module_id).get_form(form_id)
except (FormNotFoundException, ModuleNotFoundException):
raise Http404()
form_name = form.name.values()[0]
# make the name for the session we will use with the case and form
session_name = u'{app} > {form}'.format(
app=app.name,
form=form_name,
)
if case_id:
case = CaseAccessors(domain).get_case(case_id)
session_name = u'{0} - {1}'.format(session_name, case.name)
root_context = {
'form_url': form_url,
}
if instance_id:
try:
root_context['instance_xml'] = FormAccessors(domain).get_form(instance_id).get_xml()
except XFormNotFound:
raise Http404()
session_extras = {'session_name': session_name, 'app_id': app._id}
session_extras.update(get_cloudcare_session_data(domain, form, request.couch_user))
delegation = request.GET.get('task-list') == 'true'
session_helper = CaseSessionDataHelper(domain, request.couch_user, case_id, app, form, delegation=delegation)
return json_response(session_helper.get_full_context(
root_context,
session_extras
))
cloudcare_api = login_or_digest_ex(allow_cc_users=True)
def get_cases_vary_on(request, domain):
request_params = request.GET
return [
request.couch_user.get_id
if request.couch_user.is_commcare_user() else request_params.get('user_id', ''),
request_params.get('ids_only', 'false'),
request_params.get('case_id', ''),
request_params.get('footprint', 'false'),
request_params.get('closed', 'false'),
json.dumps(get_filters_from_request_params(request_params)),
domain,
]
def get_cases_skip_arg(request, domain):
"""
When this function returns True, skippable_quickcache will not go to the cache for the result. By default,
if neither of these params are passed into the function, nothing will be cached. Cache will always be
skipped if ids_only is false.
The caching is mainly a hack for touchforms to respond more quickly. Touchforms makes repeated requests to
get the list of case_ids associated with a user.
"""
if not toggles.CLOUDCARE_CACHE.enabled(domain):
return True
request_params = request.GET
return (not string_to_boolean(request_params.get('use_cache', 'false')) or
not string_to_boolean(request_params.get('ids_only', 'false')))
@cloudcare_api
@skippable_quickcache(get_cases_vary_on, get_cases_skip_arg, timeout=240 * 60)
def get_cases(request, domain):
request_params = request.GET
if request.couch_user.is_commcare_user():
user_id = request.couch_user.get_id
else:
user_id = request_params.get("user_id", "")
if not user_id and not request.couch_user.is_web_user():
return HttpResponseBadRequest("Must specify user_id!")
ids_only = string_to_boolean(request_params.get("ids_only", "false"))
case_id = request_params.get("case_id", "")
footprint = string_to_boolean(request_params.get("footprint", "false"))
accessor = CaseAccessors(domain)
if toggles.HSPH_HACK.enabled(domain):
hsph_case_id = request_params.get('hsph_hack', None)
if hsph_case_id != 'None' and hsph_case_id and user_id:
case = accessor.get_case(hsph_case_id)
usercase_id = CommCareUser.get_by_user_id(user_id).get_usercase_id()
usercase = accessor.get_case(usercase_id) if usercase_id else None
return json_response(map(
lambda case: CaseAPIResult(domain=domain, id=case['_id'], couch_doc=case, id_only=ids_only),
filter(None, [case, case.parent, usercase])
))
if case_id and not footprint:
# short circuit everything else and just return the case
# NOTE: this allows any user in the domain to access any case given
# they know its ID, which is slightly different from the previous
# behavior (can only access things you own + footprint). If we want to
# change this contract we would need to update this to check the
# owned case list + footprint
case = accessor.get_case(case_id)
assert case.domain == domain
cases = [CaseAPIResult(domain=domain, id=case_id, couch_doc=case, id_only=ids_only)]
else:
filters = get_filters_from_request_params(request_params)
status = api_closed_to_status(request_params.get('closed', 'false'))
case_type = filters.get('properties/case_type', None)
cases = get_filtered_cases(domain, status=status, case_type=case_type,
user_id=user_id, filters=filters,
footprint=footprint, ids_only=ids_only,
strip_history=True)
return json_response(cases)
@cloudcare_api
def filter_cases(request, domain, app_id, module_id, parent_id=None):
app = Application.get(app_id)
module = app.get_module(module_id)
auth_cookie = request.COOKIES.get('sessionid')
requires_parent_cases = string_to_boolean(request.GET.get('requires_parent_cases', 'false'))
xpath = EntriesHelper.get_filter_xpath(module)
instances = get_instances_for_module(app, module, additional_xpaths=[xpath])
extra_instances = [{'id': inst.id, 'src': inst.src} for inst in instances]
use_formplayer = toggles.USE_FORMPLAYER.enabled(domain)
accessor = CaseAccessors(domain)
# touchforms doesn't like this to be escaped
xpath = HTMLParser.HTMLParser().unescape(xpath)
case_type = module.case_type
if xpath or should_use_sql_backend(domain):
# if we need to do a custom filter, send it to touchforms for processing
additional_filters = {
"properties/case_type": case_type,
"footprint": True
}
helper = BaseSessionDataHelper(domain, request.couch_user)
result = helper.filter_cases(xpath, additional_filters, DjangoAuth(auth_cookie),
extra_instances=extra_instances, use_formplayer=use_formplayer)
if result.get('status', None) == 'error':
code = result.get('code', 500)
message = result.get('message', _("Something went wrong filtering your cases."))
if code == 500:
notify_exception(None, message=message)
return json_response(message, status_code=code)
case_ids = result.get("cases", [])
else:
# otherwise just use our built in api with the defaults
case_ids = [res.id for res in get_filtered_cases(
domain,
status=CASE_STATUS_OPEN,
case_type=case_type,
user_id=request.couch_user._id,
footprint=True,
ids_only=True,
)]
cases = accessor.get_cases(case_ids)
if parent_id:
cases = filter(lambda c: c.parent and c.parent.case_id == parent_id, cases)
# refilter these because we might have accidentally included footprint cases
# in the results from touchforms. this is a little hacky but the easiest
# (quick) workaround. should be revisted when we optimize the case list.
cases = filter(lambda c: c.type == case_type, cases)
cases = [c.to_api_json(lite=True) for c in cases if c]
response = {'cases': cases}
if requires_parent_cases:
# Subtract already fetched cases from parent list
parent_ids = set(map(lambda c: c['indices']['parent']['case_id'], cases)) - \
set(map(lambda c: c['case_id'], cases))
parents = accessor.get_cases(list(parent_ids))
parents = [c.to_api_json(lite=True) for c in parents]
response.update({'parents': parents})
return json_response(response)
@cloudcare_api
def get_apps_api(request, domain):
return json_response(get_cloudcare_apps(domain))
@cloudcare_api
def get_app_api(request, domain, app_id):
try:
return json_response(look_up_app_json(domain, app_id))
except RemoteAppError:
raise Http404()
@cloudcare_api
@cache_page(60 * 30)
def get_fixtures(request, domain, user_id, fixture_id=None):
try:
user = CommCareUser.get_by_user_id(user_id)
except CouchUser.AccountTypeError:
err = ("You can't use case sharing or fixtures as a %s. "
"Login as a mobile worker and try again.") % settings.WEB_USER_TERM,
return HttpResponse(err, status=412, content_type="text/plain")
if not user:
raise Http404
assert user.is_member_of(domain)
restore_user = user.to_ota_restore_user()
if not fixture_id:
ret = ElementTree.Element("fixtures")
for fixture in generator.get_fixtures(restore_user, version=V2):
ret.append(fixture)
return HttpResponse(ElementTree.tostring(ret), content_type="text/xml")
else:
fixture = generator.get_fixture_by_id(fixture_id, restore_user, version=V2)
if not fixture:
raise Http404
assert len(fixture.getchildren()) == 1, 'fixture {} expected 1 child but found {}'.format(
fixture_id, len(fixture.getchildren())
)
return HttpResponse(ElementTree.tostring(fixture.getchildren()[0]), content_type="text/xml")
@cloudcare_api
def get_sessions(request, domain):
# is it ok to pull user from the request? other api calls seem to have an explicit 'user' param
skip = request.GET.get('skip') or 0
limit = request.GET.get('limit') or 10
return json_response(get_open_form_sessions(request.user, skip=skip, limit=limit))
@cloudcare_api
def get_session_context(request, domain, session_id):
# NOTE: although this view does not appeared to be called from anywhere it is, and cannot be deleted.
# The javascript routing in cloudcare depends on it, though constructs it manually in a hardcoded way.
# see getSessionContextUrl in cloudcare/util.js
# Adding 'cloudcare_get_session_context' to this comment so that the url name passes a grep test
try:
session = EntrySession.objects.get(session_id=session_id)
except EntrySession.DoesNotExist:
session = None
if request.method == 'DELETE':
if session:
session.delete()
return json_response({'status': 'success'})
else:
helper = BaseSessionDataHelper(domain, request.couch_user)
return json_response(helper.get_full_context({
'session_id': session_id,
'app_id': session.app_id if session else None
}))
@cloudcare_api
def get_ledgers(request, domain):
"""
Returns ledgers associated with a case in the format:
{
"section_id": {
"product_id": amount,
"product_id": amount,
...
},
...
}
Note: this only works for the Couch backend
"""
request_params = request.GET
case_id = request_params.get('case_id')
if not case_id:
return json_response(
{'message': 'You must specify a case id to make this query.'},
status_code=400
)
try:
case = CaseAccessors(domain).get_case(case_id)
except CaseNotFound:
raise Http404()
ledger_map = LedgerAccessors(domain).get_case_ledger_state(case.case_id)
def custom_json_handler(obj):
if hasattr(obj, 'stock_on_hand'):
return obj.stock_on_hand
return json_handler(obj)
return json_response(
{
'entity_id': case_id,
'ledger': ledger_map,
},
default=custom_json_handler,
)
@cloudcare_api
def sync_db_api(request, domain):
auth_cookie = request.COOKIES.get('sessionid')
username = request.GET.get('username')
try:
response = sync_db(username, domain, DjangoAuth(auth_cookie))
except Exception, e:
return json_response(
{'status': 'error', 'message': unicode(e)},
status_code=500
)
else:
return json_response(response)
class ReadableQuestions(View):
urlname = 'readable_questions'
@csrf_exempt
@method_decorator(cloudcare_api)
def dispatch(self, request, *args, **kwargs):
return super(ReadableQuestions, self).dispatch(request, *args, **kwargs)
def post(self, request, domain):
instance_xml = request.POST.get('instanceXml').encode('utf-8')
app_id = request.POST.get('appId')
xmlns = request.POST.get('xmlns')
_, form_data_json = xml2json(instance_xml)
pretty_questions = readable.get_questions(domain, app_id, xmlns)
readable_form = readable.get_readable_form_data(form_data_json, pretty_questions)
rendered_readable_form = render_to_string(
'reports/form/partials/readable_form.html',
{'questions': readable_form}
)
return json_response({
'form_data': rendered_readable_form,
'form_questions': pretty_questions
})
@cloudcare_api
def render_form(request, domain):
# get session
session_id = request.GET.get('session_id')
session = get_object_or_404(EntrySession, session_id=session_id)
try:
raw_instance = get_raw_instance(session_id, domain)
except Exception, e:
return HttpResponse(e, status=500, content_type="text/plain")
xmlns = raw_instance["xmlns"]
form_data_xml = raw_instance["output"]
_, form_data_json = xml2json(form_data_xml)
pretty_questions = readable.get_questions(domain, session.app_id, xmlns)
readable_form = readable.get_readable_form_data(form_data_json, pretty_questions)
rendered_readable_form = render_to_string(
'reports/form/partials/readable_form.html',
{'questions': readable_form}
)
return json_response({
'form_data': rendered_readable_form,
'instance_xml': indent_xml(form_data_xml)
})
class HttpResponseConflict(HttpResponse):
status_code = 409
class EditCloudcareUserPermissionsView(BaseUserSettingsView):
template_name = 'cloudcare/config.html'
urlname = 'cloudcare_app_settings'
@property
def page_title(self):
if toggles.USE_FORMPLAYER_FRONTEND.enabled(self.domain):
return _("Web Apps Permissions")
else:
return _("CloudCare Permissions")
@method_decorator(domain_admin_required)
@method_decorator(requires_privilege_with_fallback(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(EditCloudcareUserPermissionsView, self).dispatch(request, *args, **kwargs)
@property
def page_context(self):
apps = get_cloudcare_apps(self.domain)
access = ApplicationAccess.get_template_json(self.domain, apps)
groups = Group.by_domain(self.domain)
return {
'apps': apps,
'groups': groups,
'access': access,
}
def put(self, request, *args, **kwargs):
j = json.loads(request.body)
old = ApplicationAccess.get_by_domain(self.domain)
new = ApplicationAccess.wrap(j)
old.restrict = new.restrict
old.app_groups = new.app_groups
try:
if old._rev != new._rev or old._id != new._id:
raise ResourceConflict()
old.save()
except ResourceConflict:
return HttpResponseConflict()
else:
return json_response({'_rev': old._rev})
|
|
from __future__ import unicode_literals
from operator import attrgetter
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.backends.db import SessionStore
from django.db.models import Count
from django.db.models.query_utils import deferred_class_factory, DeferredAttribute
from django.test import TestCase, override_settings
from .models import (
ResolveThis, Item, RelatedItem, Child, Leaf, Proxy, SimpleItem, Feature,
ItemAndSimpleItem, OneToOneItem, SpecialFeature, Location, Request,
ProxyRelated, Derived, Base,
)
class DeferRegressionTest(TestCase):
def test_basic(self):
# Deferred fields should really be deferred and not accidentally use
# the field's default value just because they aren't passed to __init__
Item.objects.create(name="first", value=42)
obj = Item.objects.only("name", "other_value").get(name="first")
# Accessing "name" doesn't trigger a new database query. Accessing
# "value" or "text" should.
with self.assertNumQueries(0):
self.assertEqual(obj.name, "first")
self.assertEqual(obj.other_value, 0)
with self.assertNumQueries(1):
self.assertEqual(obj.value, 42)
with self.assertNumQueries(1):
self.assertEqual(obj.text, "xyzzy")
with self.assertNumQueries(0):
self.assertEqual(obj.text, "xyzzy")
# Regression test for #10695. Make sure different instances don't
# inadvertently share data in the deferred descriptor objects.
i = Item.objects.create(name="no I'm first", value=37)
items = Item.objects.only("value").order_by("-value")
self.assertEqual(items[0].name, "first")
self.assertEqual(items[1].name, "no I'm first")
RelatedItem.objects.create(item=i)
r = RelatedItem.objects.defer("item").get()
self.assertEqual(r.item_id, i.id)
self.assertEqual(r.item, i)
# Some further checks for select_related() and inherited model
# behavior (regression for #10710).
c1 = Child.objects.create(name="c1", value=42)
c2 = Child.objects.create(name="c2", value=37)
Leaf.objects.create(name="l1", child=c1, second_child=c2)
obj = Leaf.objects.only("name", "child").select_related()[0]
self.assertEqual(obj.child.name, "c1")
self.assertQuerysetEqual(
Leaf.objects.select_related().only("child__name", "second_child__name"), [
"l1",
],
attrgetter("name")
)
# Models instances with deferred fields should still return the same
# content types as their non-deferred versions (bug #10738).
ctype = ContentType.objects.get_for_model
c1 = ctype(Item.objects.all()[0])
c2 = ctype(Item.objects.defer("name")[0])
c3 = ctype(Item.objects.only("name")[0])
self.assertTrue(c1 is c2 is c3)
# Regression for #10733 - only() can be used on a model with two
# foreign keys.
results = Leaf.objects.only("name", "child", "second_child").select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
results = Leaf.objects.only(
"name", "child", "second_child", "child__name", "second_child__name"
).select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
# Regression for #16409 - make sure defer() and only() work with annotate()
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).defer('name')),
list)
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).only('name')),
list)
def test_ticket_11936(self):
app_config = apps.get_app_config("defer_regress")
# Regression for #11936 - get_models should not return deferred models
# by default. Run a couple of defer queries so that app registry must
# contain some deferred classes. It might contain a lot more classes
# depending on the order the tests are ran.
list(Item.objects.defer("name"))
list(Child.objects.defer("value"))
klasses = {model.__name__ for model in app_config.get_models()}
self.assertIn("Child", klasses)
self.assertIn("Item", klasses)
self.assertNotIn("Child_Deferred_value", klasses)
self.assertNotIn("Item_Deferred_name", klasses)
self.assertFalse(any(k._deferred for k in app_config.get_models()))
klasses_with_deferred = {model.__name__ for model in app_config.get_models(include_deferred=True)}
self.assertIn("Child", klasses_with_deferred)
self.assertIn("Item", klasses_with_deferred)
self.assertIn("Child_Deferred_value", klasses_with_deferred)
self.assertIn("Item_Deferred_name", klasses_with_deferred)
self.assertTrue(any(k._deferred for k in app_config.get_models(include_deferred=True)))
@override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer')
def test_ticket_12163(self):
# Test for #12163 - Pickling error saving session with unsaved model
# instances.
SESSION_KEY = '2b1189a188b44ad18c35e1baac6ceead'
item = Item()
item._deferred = False
s = SessionStore(SESSION_KEY)
s.clear()
s["item"] = item
s.save()
s = SessionStore(SESSION_KEY)
s.modified = True
s.save()
i2 = s["item"]
self.assertFalse(i2._deferred)
def test_ticket_16409(self):
# Regression for #16409 - make sure defer() and only() work with annotate()
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).defer('name')),
list)
self.assertIsInstance(
list(SimpleItem.objects.annotate(Count('feature')).only('name')),
list)
def test_ticket_23270(self):
Derived.objects.create(text="foo", other_text="bar")
with self.assertNumQueries(1):
obj = Base.objects.select_related("derived").defer("text")[0]
self.assertIsInstance(obj.derived, Derived)
self.assertEqual("bar", obj.derived.other_text)
self.assertNotIn("text", obj.__dict__)
self.assertEqual(1, obj.derived.base_ptr_id)
def test_only_and_defer_usage_on_proxy_models(self):
# Regression for #15790 - only() broken for proxy models
proxy = Proxy.objects.create(name="proxy", value=42)
msg = 'QuerySet.only() return bogus results with proxy models'
dp = Proxy.objects.only('other_value').get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
# also test things with .defer()
msg = 'QuerySet.defer() return bogus results with proxy models'
dp = Proxy.objects.defer('name', 'text', 'value').get(pk=proxy.pk)
self.assertEqual(dp.name, proxy.name, msg=msg)
self.assertEqual(dp.value, proxy.value, msg=msg)
def test_resolve_columns(self):
ResolveThis.objects.create(num=5.0, name='Foobar')
qs = ResolveThis.objects.defer('num')
self.assertEqual(1, qs.count())
self.assertEqual('Foobar', qs[0].name)
def test_reverse_one_to_one_relations(self):
# Refs #14694. Test reverse relations which are known unique (reverse
# side has o2ofield or unique FK) - the o2o case
item = Item.objects.create(name="first", value=42)
o2o = OneToOneItem.objects.create(item=item, name="second")
self.assertEqual(len(Item.objects.defer('one_to_one_item__name')), 1)
self.assertEqual(len(Item.objects.select_related('one_to_one_item')), 1)
self.assertEqual(len(Item.objects.select_related(
'one_to_one_item').defer('one_to_one_item__name')), 1)
self.assertEqual(len(Item.objects.select_related('one_to_one_item').defer('value')), 1)
# Make sure that `only()` doesn't break when we pass in a unique relation,
# rather than a field on the relation.
self.assertEqual(len(Item.objects.only('one_to_one_item')), 1)
with self.assertNumQueries(1):
i = Item.objects.select_related('one_to_one_item')[0]
self.assertEqual(i.one_to_one_item.pk, o2o.pk)
self.assertEqual(i.one_to_one_item.name, "second")
with self.assertNumQueries(1):
i = Item.objects.select_related('one_to_one_item').defer(
'value', 'one_to_one_item__name')[0]
self.assertEqual(i.one_to_one_item.pk, o2o.pk)
self.assertEqual(i.name, "first")
with self.assertNumQueries(1):
self.assertEqual(i.one_to_one_item.name, "second")
with self.assertNumQueries(1):
self.assertEqual(i.value, 42)
def test_defer_with_select_related(self):
item1 = Item.objects.create(name="first", value=47)
item2 = Item.objects.create(name="second", value=42)
simple = SimpleItem.objects.create(name="simple", value="23")
ItemAndSimpleItem.objects.create(item=item1, simple=simple)
obj = ItemAndSimpleItem.objects.defer('item').select_related('simple').get()
self.assertEqual(obj.item, item1)
self.assertEqual(obj.item_id, item1.id)
obj.item = item2
obj.save()
obj = ItemAndSimpleItem.objects.defer('item').select_related('simple').get()
self.assertEqual(obj.item, item2)
self.assertEqual(obj.item_id, item2.id)
def test_proxy_model_defer_with_selected_related(self):
# Regression for #22050
item = Item.objects.create(name="first", value=47)
RelatedItem.objects.create(item=item)
# Defer fields with only()
obj = ProxyRelated.objects.all().select_related().only('item__name')[0]
with self.assertNumQueries(0):
self.assertEqual(obj.item.name, "first")
with self.assertNumQueries(1):
self.assertEqual(obj.item.value, 47)
def test_only_with_select_related(self):
# Test for #17485.
item = SimpleItem.objects.create(name='first', value=47)
feature = Feature.objects.create(item=item)
SpecialFeature.objects.create(feature=feature)
qs = Feature.objects.only('item__name').select_related('item')
self.assertEqual(len(qs), 1)
qs = SpecialFeature.objects.only('feature__item__name').select_related('feature__item')
self.assertEqual(len(qs), 1)
def test_deferred_class_factory(self):
new_class = deferred_class_factory(
Item,
('this_is_some_very_long_attribute_name_so_modelname_truncation_is_triggered',))
self.assertEqual(
new_class.__name__,
'Item_Deferred_this_is_some_very_long_attribute_nac34b1f495507dad6b02e2cb235c875e')
def test_deferred_class_factory_already_deferred(self):
deferred_item1 = deferred_class_factory(Item, ('name',))
deferred_item2 = deferred_class_factory(deferred_item1, ('value',))
self.assertIs(deferred_item2._meta.proxy_for_model, Item)
self.assertFalse(isinstance(deferred_item2.__dict__.get('name'), DeferredAttribute))
self.assertTrue(isinstance(deferred_item2.__dict__.get('value'), DeferredAttribute))
def test_deferred_class_factory_no_attrs(self):
deferred_cls = deferred_class_factory(Item, ())
self.assertFalse(deferred_cls._deferred)
class DeferAnnotateSelectRelatedTest(TestCase):
def test_defer_annotate_select_related(self):
location = Location.objects.create()
Request.objects.create(location=location)
self.assertIsInstance(list(Request.objects
.annotate(Count('items')).select_related('profile', 'location')
.only('profile', 'location')), list)
self.assertIsInstance(list(Request.objects
.annotate(Count('items')).select_related('profile', 'location')
.only('profile__profile1', 'location__location1')), list)
self.assertIsInstance(list(Request.objects
.annotate(Count('items')).select_related('profile', 'location')
.defer('request1', 'request2', 'request3', 'request4')), list)
|
|
from AppKit import *
from vanillaBase import VanillaBaseControl, VanillaError
_tickPositionMap = {
"left": NSTickMarkLeft,
"right": NSTickMarkRight,
"top": NSTickMarkAbove,
"bottom": NSTickMarkBelow,
}
class Slider(VanillaBaseControl):
"""
A standard slider control. Sliders can be vertical or horizontal and
they can show tick marks or not show tick marks.::
from vanilla import *
class SliderDemo(object):
def __init__(self):
self.w = Window((200, 43))
self.w.slider = Slider((10, 10, -10, 23),
tickMarkCount=10,
callback=self.sliderCallback)
self.w.open()
def sliderCallback(self, sender):
print "slider edit!", sender.get()
SliderDemo()
**posSize** Tuple of form *(left, top, width, height)* representing the position and
size of the slider. The size of the slider sould match the appropriate value for
the given *sizeStyle*.
+---------------------------+
| **Standard Dimensions** |
+---------------------------+
| *without ticks* |
+---------+---+----+---+----+
| Regular | W | 15 | H | 15 |
+---------+---+----+---+----+
| Small | W | 12 | H | 11 |
+---------+---+----+---+----+
| Mini | W | 10 | H | 10 |
+---------+---+----+---+----+
| *with ticks* |
+---------+---+----+---+----+
| Regular | W | 24 | H | 23 |
+---------+---+----+---+----+
| Small | W | 17 | H | 17 |
+---------+---+----+---+----+
| Mini | W | 16 | H | 16 |
+---------+---+----+---+----+
**minValue** The minimum value allowed by the slider.
**maxValue** The maximum value allowed by the slider.
**value** The initial value of the slider.
**tickMarkCount** The number of tick marcks to be displayed on the slider.
If *None* is given, no tick marks will be displayed.
**stopOnTickMarks** Boolean representing if the slider knob should only
stop on the tick marks.
**continuous** Boolean representing if the assigned callback should be
called during slider editing. If *False* is given, the callback will be
called after the editing has finished.
**callback** The method to be called when the slider has been edited.
**sizeStyle** A string representing the desired size style of the slider.
The options are:
+-----------+
| "regular" |
+-----------+
| "small" |
+-----------+
| "mini" |
+-----------+
"""
nsSliderClass = NSSlider
allFrameAdjustments = {
"H-Slider-Above": {
"mini": (0, 0, 0, 0),
"small": (0, -1, 0, -1),
"regular": (-2, -2, 4, 2),
},
"H-Slider-Below": {
"mini": (0, 0, 0, 0),
"small": (0, 0, 0, 0),
"regular": (-2, 0, 4, 1),
},
"H-Slider-None": {
"mini": (0, -1, 0, 2),
"small": (0, -2, 0, 3),
"regular": (-2, -4, 4, 6),
},
"V-Slider-Left": {
"mini": (0, -1, 1, 1),
"small": (0, -1, 1, 1),
"regular": (0, -3, 2, 5),
},
"V-Slider-Right": {
"mini": (0, -1, 1, 1),
"small": (-1, -1, 2, 1),
"regular": (-2, -3, 2, 5),
},
"V-Slider-None": {
"mini": (0, -1, 1, 1),
"small": (-2, -1, 4, 1),
"regular": (-3, -3, 6, 5),
},
}
def __init__(self, posSize, minValue=0, maxValue=100, value=50,
tickMarkCount=None, stopOnTickMarks=False, continuous=True,
callback=None, sizeStyle="regular"):
self._setupView(self.nsSliderClass, posSize, callback=callback)
self._setSizeStyle(sizeStyle)
self._nsObject.setMinValue_(minValue)
self._nsObject.setMaxValue_(maxValue)
self._nsObject.setFloatValue_(value)
if tickMarkCount:
self._nsObject.setNumberOfTickMarks_(tickMarkCount)
if stopOnTickMarks:
self._nsObject.setAllowsTickMarkValuesOnly_(True)
if continuous:
self._nsObject.setContinuous_(True)
else:
self._nsObject.setContinuous_(False)
def getNSSlider(self):
"""
Return the *NSSlider* that this object wraps.
"""
return self._nsObject
def _adjustPosSize(self, frame):
# temporarily store the some data for positioning reference
w, h = self._posSize[2:]
if w > h:
prefix = "H-"
isVertical = False
else:
isVertical = True
prefix = "V-"
tickPos = "None"
tickMarkCount = self._nsObject.numberOfTickMarks()
if tickMarkCount:
tickPos = self._nsObject.tickMarkPosition()
if isVertical:
if tickPos == NSTickMarkLeft:
tickPos = "Left"
elif tickPos == NSTickMarkRight:
tickPos = "Right"
# during __init__, the _nsObject will be unable
# to determine if the slider is horizontal or
# vertical, so it will return the position for
# horizontal sliders. override that and default
# to right here.
else:
tickPos = "Right"
else:
if tickPos == NSTickMarkBelow:
tickPos = "Below"
elif tickPos == NSTickMarkAbove:
tickPos = "Above"
sliderType = prefix + "Slider-" + tickPos
self.frameAdjustments = self.allFrameAdjustments[sliderType]
# now let the super class do the work
return super(Slider, self)._adjustPosSize(frame)
def get(self):
"""
Get the value of the slider.
"""
return self._nsObject.floatValue()
def set(self, value):
"""
Set the value of the slider.
"""
self._nsObject.setFloatValue_(value)
def setMinValue(self, value):
"""
Set the minimum value allowed by the slider.
"""
self._nsObject.setMinValue_(value)
def setMaxValue(self, value):
"""
Set the maximum value allowed by the slider.
"""
self._nsObject.setMaxValue_(value)
def setTickMarkCount(self, value):
"""
Set the number of tick marks on the slider.
"""
self._nsObject.setNumberOfTickMarks_(value)
def setTickMarkPosition(self, value):
"""
Set the position of the tick marks on the slider.
For vertical sliders, the options are:
+---------+
| "left" |
+---------+
| "right" |
+---------+
For horizontal sliders, the options are:
+----------+
| "top" |
+----------+
| "bottom" |
+----------+
"""
# don't rely on self._nsObject.isVertical here
# because if this is called before the object
# has been added to an open window, the isVertical
# method is unable to determine horizontal or vertical
w, h = self._posSize[2:]
if w > h:
isVertical = False
else:
isVertical = True
if isVertical:
if value == "top" or value == "bottom":
raise VanillaError("vertical sliders can only position tick marks at 'left' or 'right'")
else:
if value == "left" or value == "right":
raise VanillaError("horizontal sliders can only position tick marks at 'top' or 'bottom'")
position = _tickPositionMap[value]
self._nsObject.setTickMarkPosition_(position)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.north.plugins.storage import storage_factory
from calvin.runtime.north.plugins.coders.messages import message_coder_factory
from calvin.runtime.south.plugins.async import async
from calvin.utilities import calvinlogger
from calvin.utilities.calvin_callback import CalvinCB
from calvin.actor import actorport
_log = calvinlogger.get_logger(__name__)
class Storage(object):
"""
Storage helper functions.
All functions in this class should be async and never block.
"""
def __init__(self):
self.localstore = {}
self.localstore_sets = {}
self.started = False
self.storage = storage_factory.get("dht") # TODO: read storage type from config?
self.coder = message_coder_factory.get("json") # TODO: always json? append/remove requires json at the moment
self.flush_delayedcall = None
self.flush_timout = 1
### Storage life cycle management ###
def flush_localdata(self):
""" Write data in localstore to storage
"""
_log.debug("Flush local storage data")
self.flush_delayedcall = None
for key in self.localstore:
self.storage.set(key=key, value=self.localstore[key],
cb=CalvinCB(func=self.set_cb, org_key=None, org_value=None, org_cb=None))
for key, value in self.localstore_sets.iteritems():
if value['+']:
_log.debug("Flush append on key %s: %s" % (key, list(value['+'])))
coded_value = self.coder.encode(list(value['+']))
self.storage.append(key=key, value=coded_value,
cb=CalvinCB(func=self.append_cb, org_key=None, org_value=None, org_cb=None))
if value['-']:
_log.debug("Flush remove on key %s: %s" % (key, list(value['-'])))
coded_value = self.coder.encode(list(value['-']))
self.storage.remove(key=key, value=coded_value,
cb=CalvinCB(func=self.remove_cb, org_key=None, org_value=None, org_cb=None))
def started_cb(self, *args, **kwargs):
""" Called when storage has started, flushes localstore
"""
if args[0] == True:
self.started = True
self.flush_localdata()
if kwargs["org_cb"]:
kwargs["org_cb"](args[0])
def start(self, cb=None):
""" Start storage
"""
self.storage.start(cb=CalvinCB(self.started_cb, org_cb=cb))
def stop(self, cb=None):
""" Stop storage
"""
if self.started:
self.storage.stop(cb=cb)
self.started = False
### Storage operations ###
def set_cb(self, key, value, org_key, org_value, org_cb):
""" set callback, on error store in localstore and retry after flush_timout
"""
if value == True:
if org_cb:
org_cb(key=key, value=True)
if key in self.localstore:
del self.localstore[key]
else:
_log.error("Failed to store %s" % key)
if org_key and org_value:
if not org_value is None:
self.localstore[key] = org_value
if org_cb:
org_cb(key=key, value=False)
if self.flush_delayedcall is None:
self.flush_delayedcall = async.DelayedCall(self.flush_timout, self.flush_localdata)
else:
self.flush_delayedcall.reset()
def set(self, prefix, key, value, cb):
""" Set key: prefix+key value: value
"""
if value:
value = self.coder.encode(value)
if prefix + key in self.localstore_sets:
del self.localstore_sets[prefix + key]
if self.started:
self.storage.set(key=prefix + key, value=value, cb=CalvinCB(func=self.set_cb, org_key=key, org_value=value, org_cb=cb))
else:
if value:
self.localstore[prefix + key] = value
if cb:
cb(key=key, value=True)
def get_cb(self, key, value, org_cb, org_key):
""" get callback
"""
if value:
value = self.coder.decode(value)
org_cb(org_key, value)
def get(self, prefix, key, cb):
""" Get value for key: prefix+key, first look in localstore
"""
if cb:
if prefix + key in self.localstore:
value = self.localstore[prefix + key]
if value:
value = self.coder.decode(value)
cb(key=key, value=value)
else:
try:
self.storage.get(key=prefix + key, cb=CalvinCB(func=self.get_cb, org_cb=cb, org_key=key))
except:
_log.error("Failed to get: %s" % key)
cb(key=key, value=False)
def get_concat_cb(self, key, value, org_cb, org_key):
""" get callback
"""
if value:
value = self.coder.decode(value)
org_cb(org_key, value)
def get_concat(self, prefix, key, cb):
""" Get value for key: prefix+key, first look in localstore
Return value is list. The storage could be eventually consistent.
For example a remove might only have reached part of the
storage and hence the return list might contain removed items,
but also missing items.
"""
if cb:
if prefix + key in self.localstore_sets:
value = self.localstore_sets[prefix + key]
# Return the set that we intended to append since that's all we have until it is synced
cb(key=key, value=list(value['+']))
else:
try:
self.storage.get_concat(key=prefix + key, cb=CalvinCB(func=self.get_concat_cb, org_cb=cb, org_key=key))
except:
_log.error("Failed to get: %s" % key)
cb(key=key, value=False)
def append_cb(self, key, value, org_key, org_value, org_cb):
""" append callback, on error retry after flush_timout
"""
if value == True:
if org_cb:
org_cb(key=org_key, value=True)
if key in self.localstore_sets:
if self.localstore_sets[key]['-']:
self.localstore_sets[key]['+'] = set([])
else:
del self.localstore_sets[key]
else:
_log.error("Failed to update %s" % key)
if org_cb:
org_cb(key=org_key, value=False)
if self.flush_delayedcall is None:
self.flush_delayedcall = async.DelayedCall(self.flush_timout, self.flush_localdata)
else:
self.flush_delayedcall.reset()
def append(self, prefix, key, value, cb):
""" set operation append on key: prefix+key value: value is a list of items
"""
# Keep local storage for sets updated until confirmed
if (prefix + key) in self.localstore_sets:
# Append value items
self.localstore_sets[prefix + key]['+'] |= set(value)
# Don't remove value items any more
self.localstore_sets[prefix + key]['-'] -= set(value)
else:
self.localstore_sets[prefix + key] = {'+': set(value), '-': set([])}
if self.started:
coded_value = self.coder.encode(list(self.localstore_sets[prefix + key]['+']))
self.storage.append(key=prefix + key, value=coded_value,
cb=CalvinCB(func=self.append_cb, org_key=key, org_value=value, org_cb=cb))
else:
if cb:
cb(key=key, value=True)
def remove_cb(self, key, value, org_key, org_value, org_cb):
""" remove callback, on error retry after flush_timout
"""
if value == True:
if org_cb:
org_cb(key=org_key, value=True)
if key in self.localstore_sets:
if self.localstore_sets[key]['+']:
self.localstore_sets[key]['-'] = set([])
else:
del self.localstore_sets[key]
else:
_log.error("Failed to update %s" % key)
if org_cb:
org_cb(key=org_key, value=False)
if self.flush_delayedcall is None:
self.flush_delayedcall = async.DelayedCall(self.flush_timout, self.flush_localdata)
else:
self.flush_delayedcall.reset()
def remove(self, prefix, key, value, cb):
""" set operation remove on key: prefix+key value: value is a list of items
"""
# Keep local storage for sets updated until confirmed
if (prefix + key) in self.localstore_sets:
# Don't append value items any more
self.localstore_sets[prefix + key]['+'] -= set(value)
# Remove value items
self.localstore_sets[prefix + key]['-'] |= set(value)
else:
self.localstore_sets[prefix + key] = {'+': set([]), '-': set(value)}
if self.started:
coded_value = self.coder.encode(list(self.localstore_sets[prefix + key]['-']))
self.storage.remove(key=prefix + key, value=coded_value,
cb=CalvinCB(func=self.remove_cb, org_key=key, org_value=value, org_cb=cb))
else:
if cb:
cb(key=key, value=True)
def delete(self, prefix, key, cb):
""" Delete key: prefix+key (value set to None)
"""
if prefix + key in self.localstore:
del self.localstore[prefix + key]
if (prefix + key) in self.localstore_sets:
del self.localstore_sets[prefix + key]
if self.started:
self.set(prefix, key, None, cb)
else:
if cb:
cb(key, True)
### Calvin object handling ###
def add_node(self, node, cb=None):
"""
Add node to storage
"""
self.set(prefix="node-", key=node.id, value={"uri": node.uri,
"control_uri": node.control_uri,
"attributes": node.attributes}, cb=cb)
# Add to index after a while since storage not up and running anyway
#async.DelayedCall(1.0, self._add_node_index, node)
self._add_node_index(node)
def _add_node_index(self, node, cb=None):
try:
for index in node.attributes:
# TODO add callback, but currently no users supply a cb anyway
self.add_index(index, node.id)
except:
pass
def get_node(self, node_id, cb=None):
"""
Get node data from storage
"""
self.get(prefix="node-", key=node_id, cb=cb)
def delete_node(self, node, cb=None):
"""
Delete node from storage
"""
self.delete(prefix="node-", key=node.id, cb=None if node.attributes else cb)
if node.attributes:
self._delete_node_index(node, cb=cb)
def _delete_node_index(self, node, cb=None):
try:
counter = [len(node.attributes)] # counter value by reference used in callback
for index in node.attributes:
self.remove_index(index, node.id, cb=CalvinCB(self._delete_node_cb, counter=counter, org_cb=cb))
# The remove index gets 1 second otherwise we call the callback anyway, i.e. stop the node
async.DelayedCall(1.0, self._delete_node_timeout_cb, counter=counter, org_cb=cb)
except:
if cb:
cb()
def _delete_node_cb(self, counter, org_cb, *args, **kwargs):
counter[0] = counter[0] - 1
if counter[0] == 0:
org_cb(*args, **kwargs)
def _delete_node_timeout_cb(self, counter, org_cb):
if counter[0] > 0:
_log.debug("Delete node index not finished but call callback anyway")
org_cb()
def add_application(self, application, cb=None):
"""
Add application to storage
"""
self.set(prefix="application-", key=application.id,
value={"name": application.name,
"actors": application.actors,
"origin_node_id": application.origin_node_id},
cb=cb)
def get_application(self, application_id, cb=None):
"""
Get application from storage
"""
self.get(prefix="application-", key=application_id, cb=cb)
def delete_application(self, application_id, cb=None):
"""
Delete application from storage
"""
self.delete(prefix="application-", key=application_id, cb=cb)
def add_actor(self, actor, node_id, cb=None):
"""
Add actor and its ports to storage
"""
data = {"name": actor.name, "type": actor._type, "node_id": node_id}
inports = []
for p in actor.inports.values():
port = {"id": p.id, "name": p.name}
inports.append(port)
self.add_port(p, node_id, actor.id, "in")
data["inports"] = inports
outports = []
for p in actor.outports.values():
port = {"id": p.id, "name": p.name}
outports.append(port)
self.add_port(p, node_id, actor.id, "out")
data["outports"] = outports
self.set(prefix="actor-", key=actor.id, value=data, cb=cb)
def get_actor(self, actor_id, cb=None):
"""
Get actor from storage
"""
self.get(prefix="actor-", key=actor_id, cb=cb)
def delete_actor(self, actor_id, cb=None):
"""
Delete actor from storage
"""
self.delete(prefix="actor-", key=actor_id, cb=cb)
def add_port(self, port, node_id, actor_id=None, direction=None, cb=None):
"""
Add port to storage
"""
if direction is None:
if isinstance(port, actorport.InPort):
direction = "in"
else:
direction = "out"
if actor_id is None:
actor_id = port.owner.id
data = {"name": port.name, "connected": port.is_connected(
), "node_id": node_id, "actor_id": actor_id, "direction": direction}
if direction == "out":
if port.is_connected():
data["peers"] = port.get_peers()
else:
data["peers"] = []
elif direction == "in":
if port.is_connected():
data["peer"] = port.get_peer()
else:
data["peer"] = None
self.set(prefix="port-", key=port.id, value=data, cb=cb)
def get_port(self, port_id, cb=None):
"""
Get port from storage
"""
self.get(prefix="port-", key=port_id, cb=cb)
def delete_port(self, port_id, cb=None):
"""
Delete port from storage
"""
self.delete(prefix="port-", key=port_id, cb=cb)
def index_cb(self, key, value, org_cb, index_items):
"""
Collect all the index levels operations into one callback
"""
_log.debug("index cb key:%s, value:%s, index_items:%s" % (key, value, index_items))
#org_key = key.partition("-")[2]
org_key = key
# cb False if not already done it at first False value
if not value and index_items:
org_cb(key=org_key, value=False)
del index_items[:]
if org_key in index_items:
# remove this index level from list
index_items.remove(org_key)
# If all done send True
if not index_items:
org_cb(key=org_key, value=True)
def add_index(self, index, value, root_prefix_level=2, cb=None):
"""
Add value (typically a node id) to the storage as a set.
index: a string with slash as delimiter for finer level of index,
e.g. node/address/example_street/3/buildingA/level3/room3003,
node/affiliation/owner/com.ericsson/Harald,
node/affiliation/name/com.ericsson/laptop
value: the value that is to be added to the set stored at each level of the index
root_prefix_level: the top level of the index that can be searched,
with =1 then e.g. node/address, node/affiliation
cb: will be called when done.
"""
# TODO this implementation will store the value to each level of the index.
# When time permits a proper implementation should be done with for example
# a prefix hash table on top of the DHT or using other storage backend with
# prefix search built in.
_log.debug("add index %s: %s" % (index, value))
# Make the list of index levels that should be stored
items = index.lstrip("/").split("/")
root = "/".join(items[:root_prefix_level])
del items[:root_prefix_level]
items.insert(0, root)
# Store index at all levels
_str = ""
indexes = []
for i in items:
_str = _str + "/" + i
indexes.append(_str)
# make copy of indexes since altered in callbacks
for i in indexes[:]:
self.append(prefix="index-", key=i, value=[value],
cb=CalvinCB(self.index_cb, org_cb=cb, index_items=indexes) if cb else None)
def remove_index(self, index, value, root_prefix_level=2, cb=None):
"""
Remove value (typically a node id) from the storage as a set.
index: a string with slash as delimiter for finer level of index,
e.g. node/address/example_street/3/buildingA/level3/room3003,
node/affiliation/owner/com.ericsson/Harald,
node/affiliation/name/com.ericsson/laptop
value: the value that is to be removed from the set stored at each level of the index
root_prefix_level: the top level of the index that can be searched,
with =1 then e.g. node/address, node/affiliation
cb: will be called when done.
"""
# TODO this implementation will delete the value to each level of the index.
# When time permits a proper implementation should be done with for example
# a prefix hash table on top of the DHT or using other storage backend with
# prefix search built in.
# TODO Currently we don't go deeper than the specified index for a remove,
# e.g. node/affiliation/owner/com.ericsson would remove the value from
# all deeper indeces. But no current use case exist either.
_log.debug("remove index %s: %s" % (index, value))
# Make the list of index levels that should be removed on
items = index.lstrip("/").split("/")
root = "/".join(items[:root_prefix_level])
del items[:root_prefix_level]
items.insert(0, root)
# Remove index for all levels
_str = ""
indexes = []
for i in items:
_str = _str + "/" + i
indexes.append(_str)
# make copy of indexes since altered in callbacks
for i in indexes[:]:
self.remove(prefix="index-", key=i, value=[value],
cb=CalvinCB(self.index_cb, org_cb=cb, index_items=indexes) if cb else None)
def get_index(self, index, cb=None):
"""
Get index from the storage.
index: a string with slash as delimiter for finer level of index,
e.g. node/address/example_street/3/buildingA/level3/room3003,
node/affiliation/owner/com.ericsson/Harald,
node/affiliation/name/com.ericsson/laptop
cb: will be called when done. Should expect to be called several times with
partial results. Currently only called once.
Since storage might be eventually consistent caller must expect that the
list can containe node ids that are removed and node ids have not yet reached
the storage.
"""
# TODO this implementation will get the value from the level of the index.
# When time permits a proper implementation should be done with for example
# a prefix hash table on top of the DHT or using other storage backend with
# prefix search built in. A proper implementation might also have several callbacks
# since might get index from several levels of index trie, and instead of building a complete
# list before returning better to return iteratively for nodes with less memory
# or system with large number of nodes, might also need a timeout.
if not index.startswith("/"):
index = "/" + index
_log.debug("get index %s" % (index))
self.get_concat(prefix="index-", key=index, cb=cb)
|
|
from requests import Response
from MicrosoftApiModule import *
import demistomock as demisto
import pytest
import datetime
TOKEN = 'dummy_token'
TENANT = 'dummy_tenant'
REFRESH_TOKEN = 'dummy_refresh'
AUTH_ID = 'dummy_auth_id'
ENC_KEY = 'dummy_enc_key'
TOKEN_URL = 'mock://dummy_url'
APP_NAME = 'ms-graph-mail-listener'
BASE_URL = 'https://graph.microsoft.com/v1.0/'
OK_CODES = (200, 201, 202)
CLIENT_ID = 'dummy_client'
CLIENT_SECRET = 'dummy_secret'
APP_URL = 'https://login.microsoftonline.com/dummy_tenant/oauth2/v2.0/token'
SCOPE = 'https://graph.microsoft.com/.default'
RESOURCE = 'https://defender.windows.com/shtak'
def oproxy_client_tenant():
tenant_id = TENANT
auth_id = f'{AUTH_ID}@{TOKEN_URL}'
enc_key = ENC_KEY
app_name = APP_NAME
base_url = BASE_URL
ok_codes = OK_CODES
return MicrosoftClient(self_deployed=False, auth_id=auth_id, enc_key=enc_key, app_name=app_name,
tenant_id=tenant_id, base_url=base_url, verify=True, proxy=False, ok_codes=ok_codes)
def oproxy_client_multi_resource():
tenant_id = TENANT
auth_id = f'{AUTH_ID}@{TOKEN_URL}'
enc_key = ENC_KEY
app_name = APP_NAME
base_url = BASE_URL
ok_codes = OK_CODES
return MicrosoftClient(self_deployed=False, auth_id=auth_id, enc_key=enc_key, app_name=app_name,
tenant_id=tenant_id, base_url=base_url, verify=True, proxy=False,
ok_codes=ok_codes, multi_resource=True, resources=['https://resource1.com', 'https://resource2.com'])
def oproxy_client_refresh():
refresh_token = REFRESH_TOKEN
auth_id = f'{AUTH_ID}@{TOKEN_URL}'
enc_key = ENC_KEY
app_name = APP_NAME
base_url = BASE_URL
ok_codes = OK_CODES
return MicrosoftClient(self_deployed=False, auth_id=auth_id, enc_key=enc_key, app_name=app_name,
refresh_token=refresh_token, base_url=base_url, verify=True, proxy=False, ok_codes=ok_codes)
def self_deployed_client():
tenant_id = TENANT
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
base_url = BASE_URL
resource = RESOURCE
ok_codes = OK_CODES
return MicrosoftClient(self_deployed=True, tenant_id=tenant_id, auth_id=client_id, enc_key=client_secret,
resource=resource, base_url=base_url, verify=True, proxy=False, ok_codes=ok_codes)
def test_error_parser(mocker):
mocker.patch.object(demisto, 'error')
err = Response()
err.status_code = 401
err._content = b'{"error":{"code":"code","message":"message"}}'
response = MicrosoftClient.error_parser(err)
assert response == 'code: message'
def test_page_not_found_error(mocker):
"""
Given:
- The http_request command for making MS API calls.
When:
- The response returned is a 404 response.
Then:
- Validate that the exception is handled in the http_request function of MicrosoftClient.
"""
error_404 = Response()
error_404._content = b'{"error": {"code": "Request_ResourceNotFound", "message": "Resource ' \
b'"NotExistingUser does not exist."}}'
error_404.status_code = 404
client = self_deployed_client()
mocker.patch.object(BaseClient, '_http_request', return_value=error_404)
mocker.patch.object(client, 'get_access_token')
try:
client.http_request()
except Exception as e: # Validate that a `NotFoundError` was raised
assert type(e).__name__ == 'NotFoundError'
def test_epoch_seconds(mocker):
mocker.patch.object(MicrosoftClient, '_get_utcnow', return_value=datetime.datetime(2019, 12, 24, 14, 12, 0, 586636))
mocker.patch.object(MicrosoftClient, '_get_utcfromtimestamp', return_value=datetime.datetime(1970, 1, 1, 0, 0))
integer = MicrosoftClient.epoch_seconds()
assert integer == 1577196720
@pytest.mark.parametrize('client, tokens, context', [(oproxy_client_refresh(), (TOKEN, 3600, REFRESH_TOKEN),
{'access_token': TOKEN,
'valid_until': 3605,
'current_refresh_token': REFRESH_TOKEN}),
(oproxy_client_tenant(), (TOKEN, 3600, ''),
{'access_token': TOKEN,
'valid_until': 3605,
'current_refresh_token': ''}),
(self_deployed_client(),
(TOKEN, 3600, REFRESH_TOKEN),
{'access_token': TOKEN,
'valid_until': 3605,
'current_refresh_token': REFRESH_TOKEN})])
def test_get_access_token_no_context(mocker, client, tokens, context):
mocker.patch.object(demisto, 'getIntegrationContext', return_value={})
mocker.patch.object(demisto, 'setIntegrationContext')
mocker.patch.object(client, '_oproxy_authorize', return_value=tokens)
mocker.patch.object(client, '_get_self_deployed_token', return_value=tokens)
mocker.patch.object(client, 'epoch_seconds', return_value=10)
# Arrange
token = client.get_access_token()
integration_context = demisto.setIntegrationContext.call_args[0][0]
# Assert
assert token == TOKEN
assert integration_context == context
@pytest.mark.parametrize('client, tokens, context', [(oproxy_client_refresh(),
(TOKEN, 3600, REFRESH_TOKEN),
{'access_token': TOKEN,
'valid_until': 3605,
'current_refresh_token': REFRESH_TOKEN}),
(oproxy_client_tenant(), (TOKEN, 3600, ''),
{'access_token': TOKEN,
'valid_until': 3605,
'current_refresh_token': REFRESH_TOKEN}),
(self_deployed_client(), (TOKEN, 3600, REFRESH_TOKEN),
{'access_token': TOKEN,
'valid_until': 3605,
'current_refresh_token': REFRESH_TOKEN})])
def test_get_access_token_with_context_valid(mocker, client, tokens, context):
# Set
mocker.patch.object(demisto, 'getIntegrationContext', return_value=context)
mocker.patch.object(demisto, 'setIntegrationContext')
mocker.patch.object(client, '_oproxy_authorize', return_value=tokens)
mocker.patch.object(client, '_get_self_deployed_token', return_value=tokens)
mocker.patch.object(client, 'epoch_seconds', return_value=3600)
# Arrange
token = client.get_access_token()
set_context_count = demisto.setIntegrationContext.call_count
auth_call_oproxy = client._oproxy_authorize.call_count
auth_call_self_deployed = client._get_self_deployed_token.call_count
# Assert
assert set_context_count == 0
assert auth_call_oproxy == 0
assert auth_call_self_deployed == 0
assert token == TOKEN
@pytest.mark.parametrize('client, tokens, context_invalid, context_valid',
[(oproxy_client_refresh(),
(TOKEN, 3600, REFRESH_TOKEN),
{'access_token': TOKEN,
'valid_until': 3605,
'current_refresh_token': REFRESH_TOKEN},
{'access_token': TOKEN,
'valid_until': 8595,
'current_refresh_token': REFRESH_TOKEN}),
(oproxy_client_tenant(),
(TOKEN, 3600, ''),
{'access_token': TOKEN,
'valid_until': 3605,
'current_refresh_token': REFRESH_TOKEN},
{'access_token': TOKEN,
'valid_until': 8595,
'current_refresh_token': ''}),
(self_deployed_client(),
(TOKEN, 3600, ''),
{'access_token': TOKEN,
'valid_until': 3605,
'current_refresh_token': ''},
{'access_token': TOKEN,
'valid_until': 8595,
'current_refresh_token': ''})])
def test_get_access_token_with_context_invalid(mocker, client, tokens, context_invalid, context_valid):
# Set
mocker.patch.object(demisto, 'getIntegrationContext', return_value=context_invalid)
mocker.patch.object(demisto, 'setIntegrationContext')
mocker.patch.object(client, '_oproxy_authorize', return_value=tokens)
mocker.patch.object(client, '_get_self_deployed_token', return_value=tokens)
mocker.patch.object(client, 'epoch_seconds', side_effect=[4000, 5000])
# Arrange
token = client.get_access_token()
integration_context = demisto.setIntegrationContext.call_args[0][0]
# Assert
assert token == TOKEN
assert integration_context == context_valid
@pytest.mark.parametrize('client, enc_content, tokens, res', [(oproxy_client_tenant(), TENANT,
{'access_token': TOKEN, 'expires_in': 3600},
(TOKEN, 3600, '')),
(oproxy_client_refresh(), REFRESH_TOKEN,
{'access_token': TOKEN,
'expires_in': 3600,
'refresh_token': REFRESH_TOKEN},
(TOKEN, 3600, REFRESH_TOKEN))])
def test_oproxy_request(mocker, requests_mock, client, enc_content, tokens, res):
def get_encrypted(content, key):
return content + key
# Set
body = {
'app_name': APP_NAME,
'registration_id': AUTH_ID,
'encrypted_token': enc_content + ENC_KEY,
'scope': None,
'resource': ''
}
mocker.patch.object(client, '_add_info_headers')
mocker.patch.object(client, 'get_encrypted', side_effect=get_encrypted)
requests_mock.post(
TOKEN_URL,
json=tokens)
# Arrange
req_res = client._oproxy_authorize()
req_body = requests_mock._adapter.last_request.json()
assert req_body == body
assert req_res == res
def test_self_deployed_request(requests_mock):
import urllib
# Set
client = self_deployed_client()
body = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'grant_type': 'client_credentials',
'scope': SCOPE,
'resource': RESOURCE
}
requests_mock.post(
APP_URL,
json={'access_token': TOKEN, 'expires_in': '3600'})
# Arrange
req_res = client._get_self_deployed_token()
req_body = requests_mock._adapter.last_request._request.body
assert req_body == urllib.parse.urlencode(body)
assert req_res == (TOKEN, 3600, '')
def test_oproxy_use_resource(mocker):
"""
Given:
multi_resource client
When
When configuration is oproxy authentication type and multi resource
Then
Verify post request is using resource value
"""
resource = 'https://resource2.com'
client = oproxy_client_multi_resource()
context = {"access_token": TOKEN}
mocked_post = mocker.patch('requests.post', json=context, status_code=200, ok=True)
mocker.patch.object(client, 'get_encrypted', return_value='encrypt')
client._oproxy_authorize(resource)
assert resource == mocked_post.call_args_list[0][1]['json']['resource']
@pytest.mark.parametrize('endpoint', ['com', 'gcc-high', 'dod', 'de', 'cn'])
def test_national_endpoints(mocker, endpoint):
"""
Given:
self-deployed client
When:
Configuring the client with different national endpoints
Then:
Verify that the token_retrieval_url and the scope are set correctly
"""
tenant_id = TENANT
auth_id = f'{AUTH_ID}@{TOKEN_URL}'
enc_key = ENC_KEY
app_name = APP_NAME
base_url = BASE_URL
ok_codes = OK_CODES
client = MicrosoftClient(self_deployed=True, auth_id=auth_id, enc_key=enc_key, app_name=app_name,
tenant_id=tenant_id, base_url=base_url, verify=True, proxy=False, ok_codes=ok_codes,
endpoint=endpoint)
assert client.azure_ad_endpoint == TOKEN_RETRIEVAL_ENDPOINTS[endpoint]
assert client.scope == f'{GRAPH_ENDPOINTS[endpoint]}/.default'
|
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------------------------------------------------
# EXTERNAL PYTHON PACKAGES
# - nibabel : <http://nipy.sourceforge.net/nibabel/>
# - numpy : <http://www.numpy.org>
# - scipy
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Copyright (c) 2013 NeuroPoly, Polytechnique Montreal <www.neuropoly.info>
# Authors: Simon LEVY
#
# License: see the LICENSE.TXT
# ======================================================================================================================
# BECAREFUL, we assume here that we have a RPI orientation, where Z axis is inferior-superior direction
import os
import getopt
import sys
import sct_utils as sct
import scipy.ndimage
try:
import nibabel
except ImportError:
print '--- nibabel not installed! Exit program. ---'
sys.exit(2)
try:
import numpy as np
except ImportError:
print '--- numpy not installed! Exit program. ---'
sys.exit(2)
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# Variable initialization
strategy = ""
fname_centerline = ""
fname_input_image = ""
fname_output_image = ""
fname_mask = ""
# extract path of the script
path_script = os.path.dirname(__file__) + '/'
# Check input param
try:
opts, args = getopt.getopt(sys.argv[1:], 'hi:o:m:f:s:c:')
except getopt.GetoptError as err:
print str(err)
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ('-i'):
fname_input_image = arg
elif opt in ('-o'):
fname_output_image = arg
elif opt in ('-m'):
fname_mask = arg
elif opt in ('-f'):
filter_type = str(arg)
elif opt in ('-s'):
strategy = str(arg)
elif opt in ('-c'):
fname_centerline = arg
# display usage if a mandatory argument is not provided
if fname_input_image == '' or fname_mask == '' or (strategy=="along_centerline" and fname_centerline==""):
print("\n \n \n All mandatory arguments are not provided \n \n \n")
usage()
# check existence of input files
sct.check_file_exist(fname_input_image)
sct.check_file_exist(fname_mask)
if strategy == "along_centerline":
sct.check_file_exist(fname_centerline)
# extract path/file/extension
path_input_image, file_input_image, ext_input_image = sct.extract_fname(fname_input_image)
path_output_image, file_output_image, ext_output_image = sct.extract_fname(fname_output_image)
# read nifti input file
img = nibabel.load(fname_input_image)
# 3d array for each x y z voxel values for the input nifti image
data = img.get_data()
hdr = img.get_header()
# read nifti mask file
mask = nibabel.load(fname_mask)
# 3d array for each x y z voxel values for the input nifti mask
mask_data = mask.get_data()
mask_hdr = mask.get_header()
# Compute the image to extract the smoothed spinal cord data from according to the chosen strategy
if strategy == "mean_per_slice":
print("\n \nThe smoothing strategy is to apply the smoothing to an image of the spinal cord completed"
" with the mean value of the spinal cord for each z-slice...\n \n")
data = smooth_mean_per_slice(data, mask_data)
elif strategy == "along_centerline":
print("\n \nThe smoothing strategy is to apply the smoothing to the original data along the spinal cord "
"in the direction of the centerline...\n \n")
data = smooth_along_centerline(data, fname_input_image, file_input_image, ext_input_image, mask_data,
fname_centerline)
elif strategy == "total_image" or "":
print("\n \nThe smoothing strategy is to apply the smoothing to the entire original image...\n \n")
data = smooth_total(data,mask_data)
else:
print("\n \nThe smoothing strategy is not correct\n \n")
usage()
# Return the nifti corrected data
hdr.set_data_dtype('uint8') # set imagetype to uint8
print '\nWrite NIFTI volumes...'
img = nibabel.Nifti1Image(data, None, hdr)
nibabel.save(img, 'tmp.' + file_output_image + '.nii')
sct.generate_output_file('tmp.' + file_output_image + '.nii', './', file_output_image, ext_output_image)
#=======================================================================================================================
# Functions used in the main
#=======================================================================================================================
def apply_filter(data, filter_type='gaussian'):
"""Apply the chosen filter to the image"""
if filter_type == 'gaussian':
print '\nApply a Gaussian filter...'
sigma = 1 # standard deviation for Gaussian kernel
data_filtered = scipy.ndimage.filters.gaussian_filter(data, sigma)
return data_filtered
def smooth_mean_per_slice(data, mask_data):
"""Apply the smoothing to an image of the spinal cord completed with the mean value of
the spinal cord for each z-slice and return the original data with the smoothed spinal cord"""
# Create a new image keeping only the spinal cord and assigning to the other voxels the mean value across the spinal cord
# Find the voxels that belong to the spinal cord
X, Y, Z = (mask_data > 0).nonzero()
# Define useful variables
N = len(X) # number of voxels in the spinal cord segmentation
Z_min = min(Z) # min z of the segmentation
Z_max = max(Z) # max z of the segmentation
Z_nb = Z_max - Z_min + 1 # number of different z-slice of the segmentation
x = len(data) # number of voxels in the X direction
y = len(data[1]) # number of voxels in the Y direction
z = len(data[1][1]) # number of voxels in the Z direction
## Count the number of voxels belonging to the spinal cord for each slice according to the segmentation
#nb_vox_per_slice = [0 for i in range(0,Z_nb)] # initialization
#z_index = 0
#while z_index < Z_nb:
# nb_vox_per_slice[z_index]+= 1
# z_index+= 1
# Sort by z-slice the values of the voxels belonging to the spinal cord
sc_values = [[] for Z_index in range(0, Z_nb)] # initialization
for vox_index in range(0, N):
sc_values[Z[vox_index] - Z_min].append(data[X[vox_index]][Y[vox_index]][Z[vox_index]])
# Compute the mean value for each slice of the spinal cord
print '\nCompute the mean value for each slice of the spinal cord...'
sc_mean_per_slice = [0 for Z_index in range(0, Z_nb)] # initialization
for Z_index in range(0, Z_nb):
sc_mean_per_slice[Z_index] = sum(sc_values[Z_index]) / len(sc_values[Z_index])
# Define a new image assigning to all the voxels that don't belong to the mean value across their slice
print '\nCreate a new image to smooth keeping only the spinal cord and completing with the previously computed mean values...'
sc_data = [[[0 for k in range(0, z)] for j in range(0, y)] for i in
range(0, x)] # initialization by the size of the original data
for k in range(0, z):
for j in range(0, y):
for i in range(0, x):
if k < Z_min:
sc_data[i][j][k] = sc_mean_per_slice[0]
elif Z_min <= k <= Z_max:
sc_data[i][j][k] = sc_mean_per_slice[k - Z_min]
elif k > Z_max:
sc_data[i][j][k] = sc_mean_per_slice[len(sc_mean_per_slice) - 1]
# Assign the right value to the voxels that belong to the spinal cord
for i in range(0, N):
sc_data[X[i]][Y[i]][Z[i]] = data[X[i]][Y[i]][Z[i]]
# Apply the filter to these new image
smoothed_sc_data = apply_filter(sc_data)
# Replace the original spinal cord data by the smoothed one in the original data
for i in range(0, N):
data[X[i]][Y[i]][Z[i]] = smoothed_sc_data[X[i]][Y[i]][Z[i]]
# Return the corrected data
return data
def smooth_total(data, mask_data):
"""Apply the smoothing to the original data and return the original data where the original
spinal cord was replaced by the smoothed spinal cord"""
# Find the voxels that belong to the spinal cord
X, Y, Z = (mask_data > 0).nonzero()
# Define useful variable
N = len(X) # number of voxels in the spinal cord segmentation
# Apply the filter to the original data
smoothed_sc_data = apply_filter(data)
# Replace the original spinal cord data by the smoothed one in the original data
for i in range(0, N):
data[X[i]][Y[i]][Z[i]] = smoothed_sc_data[X[i]][Y[i]][Z[i]]
# Return the corrected data
return data
def smooth_along_centerline(data, fname_input_image, file_input_image, ext_input_image, mask_data, fname_centerline):
"""Apply the smoothing to the original data along the spinal cord in the direction of the centerline and return
the original data where the original spinal cord was replaced by the smoothed spinal cord"""
# Find the voxels that belong to the spinal cord
X, Y, Z = (mask_data > 0).nonzero()
# Define useful variable
N = len(X) # number of voxels in the spinal cord segmentation
# Apply the script "sct_smooth_spinal_cord.py" to the original image
print("\n \n \n Apply the script \"sct_smooth_spinal_cord.py\" to the original image\n \n ")
os.system("python sct_smooth_spinal_cord.py -i " + str(fname_input_image) + " -c " + str(fname_centerline))
# Read the nifti output file resulting from the previously run script
print("\n \n Loading"+"./" + str(file_input_image) + "_smoothed" + str(ext_input_image)+"\n \n")
smoothed_img = nibabel.load("./" + str(file_input_image) + "_smoothed" + str(ext_input_image))
# 3d array for each x y z voxel values for the nifti image
smoothed_sc_data = smoothed_img.get_data()
# Replace the original spinal cord data by the smoothed one in the original data
for i in range(0, N):
data[X[i]][Y[i]][Z[i]] = smoothed_sc_data[X[i]][Y[i]][Z[i]]
# Return the corrected data
return data
#=======================================================================================================================
# usage
#=======================================================================================================================
def usage():
print 'USAGE: \n' \
' sct_apply_local_filter.py -i <inputimage> -o <outputimage> -m <mask> [options]\n' \
'\n' \
'MANDATORY ARGUMENTS\n' \
' -i input volume.\n' \
' -o output volume.\n' \
' -m binary mask refering to zone where to apply the filter.\n' \
'\n' \
'OPTIONAL ARGUMENTS\n' \
' -h help. Show this message.\n' \
' -f type of filter to apply (default=\"gaussian\")\n' \
' -s smoothing strategy: either \"total_image\", \"mean_per_slice\" or \"along_centerline\"' \
' (default=\"total image\")\n' \
' -c centerline of the input spinal cord image if \"along_centerline\" is given as -s argument' \
'\n' \
'\n' \
'EXAMPLE:\n' \
' sct_apply_local_filter.py -i t2.nii.gz -o t2_filtered_WM.nii.gz -m t2_seg.nii.gz -f median\n'
sys.exit(2)
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
# call main function
main()
|
|
# Django settings for qsic3 project.
import os
import dj_database_url
from easy_thumbnails.conf import Settings as thumbnail_settings
def get_env_var(env_var, default=None, isbool=False):
"""
Return value of envirnoment variable or throw exception
"""
from django.core.exceptions import ImproperlyConfigured
try:
env_value = os.environ.get(env_var, default)
if isbool:
env_value = 'true' in str(env_value).lower().strip()
return env_value
except KeyError:
error_msg = '{} environment variable not set'.format(env_var)
raise ImproperlyConfigured(error_msg)
# Return directory name containing file depth levels deep
dirname = lambda file, depth: os.path.dirname(dirname(file, depth-1)) if depth else file
PROJECT_ROOT = os.path.abspath(dirname(__file__, 3))
rootjoin = lambda *args: os.path.join(PROJECT_ROOT, *args)
DEBUG = get_env_var('DJANGO_DEBUG', default=False, isbool=True)
TEMPLATE_DEBUG = DEBUG
THUMBNAIL_DEBUG = DEBUG
DATABASE_URL = get_env_var('DATABASE_URL')
DATABASES = {
'default': dj_database_url.config()
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-i
# dentifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Filesystem directory names for static and media files
STATIC_DIR = 'static'
MEDIA_DIR = 'media'
# AWS file access info
AWS_ACCESS_KEY_ID = get_env_var('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = get_env_var('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = get_env_var('AWS_STORAGE_BUCKET_NAME')
# Of the format: '//bucket_name.s3.amazonaws.com/[media|static]/'
AWS_S3_BUCKET_URL = '//%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
# Encoding for AWS transactions
ENCODING = 'utf-8'
# Serve static from AWS
# tell django to use django-storages
STATICFILES_STORAGE = 'django_py3s3.storages.S3StaticStorage'
STATIC_ROOT = AWS_S3_BUCKET_URL + '/' + STATIC_DIR + '/'
STATIC_URL = STATIC_ROOT
# Serve media from AWS
MEDIA_ROOT = AWS_S3_BUCKET_URL + '/' + MEDIA_DIR + '/'
MEDIA_URL = MEDIA_ROOT
DEFAULT_FILE_STORAGE = 'django_py3s3.storages.S3MediaStorage'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
rootjoin('static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'project_settings.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'project_settings.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
rootjoin('templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_extensions',
'easy_thumbnails',
'image_cropping',
'core',
'events',
'groups',
'performers',
'py3s3',
'raven.contrib.django.raven_compat',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# Make this unique, and don't share it with anybody.
SECRET_KEY = get_env_var('DJANGO_SECRET_KEY')
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d '
'%(thread)d %(message)s'
},
'medium': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'medium'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# easy_thumbnails and django image cropping
THUMBNAIL_PROCESSORS = (
'image_cropping.thumbnail_processors.crop_corners',
) + thumbnail_settings.THUMBNAIL_PROCESSORS
#THUMBNAIL_DEFAULT_STORAGE = 'easy_thumbnails.storage.ThumbnailFileSystemStorage'
THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE
IMAGE_CROPPING_SIZE_WARNING = True
SOUTH_MIGRATION_MODULES = {
'easy_thumbnails': 'easy_thumbnails.south_migrations',
}
# Set your DSN value
RAVEN_CONFIG = {
'dsn': 'https://e92ff3ac19cd4d89945f1b5e428f061d:[email protected]/34299',
}
|
|
import wx
import wx.richtext as rtc
import prefs
import re
from window.basepane import BasePane
from utility import platform
from theme import Theme
class InputPane(BasePane):
def __init__(self, parent, connection):
BasePane.__init__(self, parent, connection,
style = wx.TE_PROCESS_ENTER | wx.TE_MULTILINE
)
self.cmd_history = CommandHistory(self)
self.tab_completion = TabCompletion(self, connection)
self.tabs = wx.GetApp().GetTopWindow().tabs
self.Bind(wx.EVT_TEXT_ENTER, self.send_to_connection )
self.Bind(wx.EVT_TEXT, self.onTextChange )
self.Bind(wx.EVT_KEY_DOWN, self.check_for_interesting_keystrokes )
self.Bind(wx.EVT_CHAR_HOOK, self.do_keyboard_copy )
self.AddClearAllToMenu()
self.Clear()
self.restyle_thyself()
def do_keyboard_copy(self, evt):
if evt.CmdDown():
k = evt.GetKeyCode()
if k == 67:
self.GetTopLevelParent().handleCopy(evt)
return
#if k == 86: print("That was a Cmd-V")
#if k == 88: print("That was a Cmd-X")
evt.Skip()
def doClear(self, evt): self.Clear()
def AddClearAllToMenu(self):
menu = self.GetContextMenu()
selectall, selectall_pos = menu.FindChildItem(menu.FindItem("Select All"))
clear_input = menu.Insert(selectall_pos, -1, "Clear Input", "Clears all text from the input")
self.Bind(wx.EVT_MENU, self.doClear, clear_input)
self.SetContextMenu(menu)
def paste_from_selection(self, evt = None):
uxcp = prefs.get('use_x_copy_paste')
if uxcp and platform == 'linux': wx.TheClipboard.UsePrimarySelection(True)
self.Paste()
if uxcp and platform == 'linux': wx.TheClipboard.UsePrimarySelection(False)
def copy_from_selection(self, evt = None):
uxcp = prefs.get('use_x_copy_paste')
if uxcp and platform == 'linux': wx.TheClipboard.UsePrimarySelection(True)
self.Copy()
if uxcp and platform == 'linux': wx.TheClipboard.UsePrimarySelection(False)
### HANDLERS
def send_to_connection(self, evt):
if self.connection:
stuff = self.GetValue()
self.cmd_history.add(stuff)
self.connection.output(stuff + "\n")
self.Clear()
if prefs.get('local_echo') and (not 'ECHO' in self.connection.iac or self.connection.iac['ECHO'] == True):
self.connection.output_pane.display(">" + stuff + "\n")
def check_for_interesting_keystrokes(self, evt):
k = evt.GetKeyCode()
if k == wx.WXK_UP:
if self.tab_completion.IsShown():
self.tab_completion.prev_item()
else:
self.SetValue(self.cmd_history.prev())
elif k == wx.WXK_DOWN:
if self.tab_completion.IsShown():
self.tab_completion.next_item()
else:
self.SetValue(self.cmd_history.next())
elif k == wx.WXK_PAGEUP: self.connection.output_pane.ScrollPages(-1)
elif k == wx.WXK_PAGEDOWN: self.connection.output_pane.ScrollPages(1)
elif k == wx.WXK_TAB: self.fetch_completions()
elif k == wx.WXK_ESCAPE: self.tab_completion.CloseAndClear()
elif k == wx.WXK_INSERT:
if evt.ShiftDown(): self.paste_from_selection()
elif k == wx.WXK_RETURN or k == wx.WXK_NUMPAD_ENTER:
if self.tab_completion.IsShown():
self.do_completion(*self.tab_completion.pick_completion())
else:
self.send_to_connection(evt)
# either way:
self.tab_completion.CloseAndClear()
elif k == wx.WXK_HOME:
curpos = self.GetCaretPosition()
self.SetInsertionPoint(0)
self.ShowPosition(0)
if evt.ShiftDown(): self.SetSelection(0, curpos+1)
return
elif k == wx.WXK_END:
curpos = self.GetCaretPosition()
self.SetInsertionPointEnd()
self.ShowPosition(-1)
if evt.ShiftDown(): self.SetSelection(curpos+1, self.GetCaretPosition()+1)
return
# Cmd-[#] to switch directly to a tab -- includes 1234567890-= keys
# this is a little confusing because the tab indices are zero-based, so
# we want key [1] to turn into a 0.
elif evt.CmdDown() and (k in (49,50,51,52,53,54,55,56,57,48,45,61)):
# for [1]-[9], we want indices 0-8, so subtract 49 from k to get that
page_index = k - 49
if (k == 48): page_index = 9 # [0]
if (k == 45): page_index = 10 # [-]
if (k == 61): page_index = 11 # [=]
if (page_index > self.tabs.GetPageCount()): return
# if we're re-selecting the current one, pop back to the last one
# this behavior copped from weechat
if (page_index == self.tabs.GetSelection()):
page_index = self.tabs.last_selection
self.tabs.last_selection = self.tabs.SetSelection(page_index)
# Cmd-Left / Cmd-Right to switch tabs
elif (evt.CmdDown() and (k == wx.WXK_LEFT or k == wx.WXK_RIGHT)):
self.tabs.AdvanceSelection(k == wx.WXK_RIGHT)
elif k == ord('W') and evt.CmdDown(): # Ctrl-W
self.delete_last_word()
else:
# self.tab_completion.CloseAndClear()
evt.Skip()
return
self.SetInsertionPointEnd()
def onTextChange(self, evt):
self.cmd_history.update(self.GetValue())
if self.GetValue() == '':
self.tab_completion.CloseAndClear()
if self.tab_completion.IsShown():
evt.Skip()
self.fetch_completions()
def delete_last_word(self):
current_value = self.GetValue()
if not current_value: return
new_value = current_value.rsplit(None, 1)[0]
if new_value == current_value: new_value = ''
self.SetValue( new_value )
def fetch_completions(self):
self.tab_completion.complete(self.GetValue())
def do_completion(self, begin_pos, completion):
if completion:
self.tab_completion.CloseAndClear()
self.SetValue(self.GetValue()[:int(begin_pos)] + completion)
self.SetInsertionPointEnd()
class CommandHistory:
# we keep a list of historical entries, and a 'cursor' so we can
# keep track of where we are looking in the list. The last
# entry in the history gets twiddled as we go. Once we are done
# with it and enter it into history, a fresh '' gets appended to
# the array, on and on, world without end.
def __init__(self, parent):
self.history = ['']
self.current = 0
self.parent = parent
# which entry does our 'cursor' point to?
def current_entry(self):
return self.history[self.current]
def set_current(self, string):
self.history[self.current] = string
def prev(self):
if self.current > 0: self.current -= 1
return self.current_entry()
def next(self):
if self.current < len(self.history)-1: self.current += 1
return self.current_entry()
# if we've actually changed anything, take the changed value
# and use it as the new "current" value, at the end of the array.
def update(self, string):
string = string.rstrip()
if (self.current_entry() != string):
self.current = len(self.history)-1
self.set_current(string)
# this is the final state of the thing we input.
# Make sure it's updated, then push a fresh '' onto the end
def add(self, string=""):
string = string.rstrip()
if string == "": return # no blank lines pls
# some special cases
if len(self.history) > 1:
# if it's a repeat of the very previous one, don't add it
if string == self.history[-2]:
self.update('')
return
else:
# no history yet, is it "co username password"? Don't add it.
if re.match('^co', string):
self.update('')
return
self.history[-1] = string
self.current = len(self.history)
self.history.append('')
self.update('')
class TabCompletion(wx.PopupWindow):
def __init__(self, parent, connection):
wx.PopupWindow.__init__(self, parent,
flags = wx.BORDER_SIMPLE
)
self.verbs = []
self.names = []
self.parent = parent
self.completers = None
self.completion_list = CompletionList(self)
self.last_completed = None
self.connection = connection
self.SetBackgroundColour(Theme.fetch().get('foreground'))
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.completion_list, 1, wx.ALL|wx.EXPAND, 2)
self.SetSizer(sizer)
def pick_completion(self):
current = self.completion_list.GetFirstSelected()
return self.begin_pos, self.completion_list.GetItemText(current)
def CloseAndClear(self):
self.Hide()
if self.completion_list:
self.completion_list.ClearAll()
def next_item(self):
clist = self.completion_list
current = clist.GetFirstSelected()
if current == clist.GetItemCount()-1:
current = 0
else:
current +=1
clist.Select(current)
clist.EnsureVisible(current)
def prev_item(self):
clist = self.completion_list
current = clist.GetFirstSelected()
if current == 0:
current = clist.GetItemCount()-1
else:
current -=1
clist.Select(current)
clist.EnsureVisible(current)
def complete(self, to_complete):
if not to_complete: return
# if we've just hit <tab> again without making any changes...
if self.last_completed and (to_complete == self.last_completed):
# ...re-show the popup if it's hidden...
if not self.IsShown():
self.Show()
# ...and do nothing else...
return
# ...otherwise (wasn't hidden), move the selection 'down' by one...
self.next_item()
# ...and do nothing else...
return
#... otherwise, carry on
# TODO - so far we only have the one possible completer but maybe later we'll select from options
if self.completers:
self.completers.request(self.popup_completions, to_complete)
def popup_completions(self, begin_pos, to_complete, completions):
# do we have one or more new completions for the list?
if completions:
# populate the list in every case
self.completion_list.fill(completions)
self.begin_pos = begin_pos
self.last_completed = to_complete
if len(completions) == 1:
# we have just the one completion, we should use it
self.parent.do_completion(begin_pos, completions[0])
self.last_completed = None
else:
# there are multiple, format and show the list
w, h = self.completion_list.GetSize()
avail_height = min( h, self.connection.output_pane.GetSize()[1])
# if we're gonna have a vertical scrollbar, make room
if h > avail_height:
w = w + 15
adj = 1
if platform == "windows": adj = 10
self.SetSize((w + adj, avail_height))
self.Layout()
# find the x and y location to pop up the menu
x_pos, y_pos = self.parent.ClientToScreen((-2,-5))
# temporarily move the cursor back to begin_pos so we can
# find out where, along the 'x' axis, the text being completed
# actually begins
self.parent.SetInsertionPoint(int(begin_pos))
x_pos += self.parent.GetCaret().GetPosition()[0]
self.parent.SetInsertionPointEnd()
self.SetPosition((x_pos, y_pos - avail_height))
self.Show(True)
# pressing tab but no completions
else:
self.last_completed = None
self.CloseAndClear()
class CompletionList(wx.ListCtrl):
def __init__(self, parent):
wx.ListCtrl.__init__(self, parent,
style = wx.LC_REPORT|wx.LC_NO_HEADER|wx.LC_SINGLE_SEL
)
self.parent = parent
self.SetTextColour(Theme.fetch().get('foreground'))
self.SetBackgroundColour(Theme.fetch().get('background'))
font = wx.Font(prefs.get('font'))
self.SetFont(font)
self.Bind(wx.EVT_KEY_DOWN, self.parent.parent.check_for_interesting_keystrokes )
def fill(self, completions):
self.ClearAll()
self.InsertColumn(0, '')
for i,c in enumerate(completions):
self.InsertItem(i,c)
# hoops to jump through to shrink-wrap the list
height = 10
for idx in range(self.GetItemCount()):
height += self.GetItemRect(idx).height
self.SetColumnWidth(0,-1)
self.SetSize((self.GetColumnWidth(0) + 5, height))
self.Select(0)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.admin import GenericTabularInline
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import TestCase, override_settings, RequestFactory
from django.utils.deprecation import RemovedInDjango19Warning
# local test models
from .admin import MediaInline, MediaPermanentInline, site as admin_site
from .models import Episode, Media, EpisodePermanent, Category
# Set TEMPLATE_DEBUG to True to ensure {% include %} will raise exceptions.
# That is how inlines are rendered and #9498 will bubble up if it is an issue.
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
TEMPLATE_DEBUG=True,
ROOT_URLCONF="generic_inline_admin.urls")
class GenericAdminViewTest(TestCase):
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/add/')
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk)
self.assertEqual(response.status_code, 200)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/episode/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = '/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_generic_inline_formset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def test_generic_inline_formset_factory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminParametersTest(TestCase):
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
self.factory = RequestFactory()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def test_no_param(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def test_extra_param(self):
"""
With extra=0, there should be one form.
"""
class ExtraInline(GenericTabularInline):
model = Media
extra = 0
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [ExtraInline]
e = self._create_object(Episode)
request = self.factory.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
class MaxNumInline(GenericTabularInline):
model = Media
extra = 5
max_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MaxNumInline]
e = self._create_object(Episode)
request = self.factory.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
def test_min_num_param(self):
"""
With extra=3 and min_num=2, there should be five forms.
"""
class MinNumInline(GenericTabularInline):
model = Media
extra = 3
min_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MinNumInline]
e = self._create_object(Episode)
request = self.factory.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 5)
self.assertEqual(formset.initial_form_count(), 1)
def test_get_extra(self):
class GetExtraInline(GenericTabularInline):
model = Media
extra = 4
def get_extra(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetExtraInline]
e = self._create_object(Episode)
request = self.factory.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.extra, 2)
def test_get_min_num(self):
class GetMinNumInline(GenericTabularInline):
model = Media
min_num = 5
def get_min_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMinNumInline]
e = self._create_object(Episode)
request = self.factory.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.min_num, 2)
def test_get_max_num(self):
class GetMaxNumInline(GenericTabularInline):
model = Media
extra = 5
def get_max_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMaxNumInline]
e = self._create_object(Episode)
request = self.factory.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.max_num, 2)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminWithUniqueTogetherTest(TestCase):
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def test_add(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/contact/add/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/contact/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class NoInlineDeletionTest(TestCase):
def test_no_deletion(self):
inline = MediaPermanentInline(EpisodePermanent, admin_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineModelAdminTest(TestCase):
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
def test_get_formsets_with_inlines(self):
"""
get_formsets() triggers a deprecation warning when get_formsets is
overridden.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
def get_formsets(self, request, obj=None):
return []
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
ma = EpisodeAdmin(Episode, self.site)
list(ma.get_formsets_with_inlines(request))
# Verify that the deprecation warning was triggered when get_formsets was called
# This verifies that we called that method.
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, RemovedInDjango19Warning))
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
ma = EpisodeAdmin(Episode, self.site)
list(ma.get_formsets_with_inlines(request))
self.assertEqual(len(w), 0)
def test_get_formsets_with_inlines_returns_tuples(self):
"""
Ensure that get_formsets_with_inlines() returns the correct tuples.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class AlternateInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
AlternateInline, MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
AlternateInline, MediaInline
]
def get_formsets(self, request, obj=None):
# Catch the deprecation warning to force the usage of get_formsets
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
return super(EpisodeAdmin, self).get_formsets(request, obj)
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
|
|
from serial import Serial
from collections import deque
from time import sleep
from enum import Enum
from threading import Thread
from datetime import datetime, timedelta
SERIAL_POLL_INTERVAL = 0.1 # Seconds between hardware serial polls pushing to serial queue
BROKER_POLL_INTERVAL = 0.05
SERIAL_TIMING_DELAY = 3.0
MESSAGE_PARSE_INTERVAL = 1.0 # clearning serial queue and pushing to parse queue and updating UI data set
MESSAGE_BROKER_INTERVAL = 1.0 # clearing parse queue and pushing to DB and file queues
DB_LOCAL_WORKER_INTERVAL = 5.0 # Seconds between write attempts to the DBs
DB_CLOUD_WORKER_INTERVAL = 15.0
UI_REFRESH_INTERVAL = 1.0 # Seconds wait between updating display from UI data
GRAPH_UPDATE_INTERVAL = 120.0 # Seconds - to re-request graph data from DB and update
CLOCK_UPDATE_INTERVAL = 1.0 # Literally the clock display update
MYSQL_POLL_INTERVAL = 3.0 # If enabled, how often UI data set is refreshed from the DB
class InterfaceModes(Enum):
CHAR = 0
LINE = 1
BINARY = 2
class Utils():
def __init__(self, **kwargs):
super(Utils, self).__init__(**kwargs)
def timeout_start(self, timeout_period=5):
self.time_start = datetime.now()
self.time_period = timedelta(seconds = timeout_period)
def timeout(self):
if (datetime.now() - self.time_start) > self.time_period:
return True
else: return False
class Interface():
def __init__(self, **kwargs):
super(Interface, self).__init__(**kwargs)
self.id = ""
self.description = ""
self.hanging_prompts = False;
self.txQ = deque()
self.rxQ = deque()
self.rxQMax = 2000
self.txQMax = 2000
self.errorLog = deque()
self.mode = InterfaceModes.LINE
self.connected = False
self.input_array = bytearray()
self.output_array = bytearray()
self.byte_count = 0
def changeMode(self, mode):
self.mode = mode
class LinuxSerialPort(Interface):
def __init__(self, **kwargs):
super(LinuxSerialPort, self).__init__(**kwargs)
self.lastSerialWriteDT = datetime.now()
self.lastSerialReadDT = datetime.now()
def config(self, device="/dev/null", speed=9600):
self.device = device
self.speed = speed
def connect(self):
self.connection = Serial(self.device, self.speed)
#sleep(0.3)
#self.connection.open()
if self.connection.is_open:
self.connected = True
print("LinuxSerialPort Open")
return True
def close(self):
self.connection.close()
self.connected = False
def timeSinceLastRXTX(self):
if self.lastSerialWriteDT > self.lastSerialReadDT:
self.idle_time = datetime.now() - self.lastSerialWriteDT
else:
self.idle_time = datetime.now() -self.lastSerialReadDT
return self.idle_time.total_seconds()
def timeSinceLastTX(self):
self.idle_tx = datetime.now() - self.lastSerialWriteDT
return
def readAvailableToQueue(self):
self.TDSinceLastWrite = datetime.now() - self.lastSerialWriteDT
if self.TDSinceLastWrite.seconds > 2:
sleep(.5)
if (self.connection.inWaiting() > 0):
try:
if (len(self.rxQ) < self.rxQMax):
self.input_string = self.connection.readline(timeout=1)
self.lastSerialReadDT = datetime.now()
self.input_string = self.input_string.decode()
self.input_string = self.input_string.rstrip('\n\r')
self.rxQ.append(self.input_string)
sleep(0.5)
else:
self.errorLog.append(self.name, "Serial RX Queue Full. UART may overrun.")
except:
self.errorLog.append("Error reading from serial port")
def writeQueueToSerial(self):
if (len(self.txQ) >0):
try:
self.TDSinceLastRead = datetime.now() - self.lastSerialReadDT
self.TDSinceLastWrite = datetime.now() - self.lastSerialWriteDT
#print (TD.microseconds)
if (self.TDSinceLastRead.seconds > 2) and (self.TDSinceLastRead.seconds > 2):
line = self.txQ.popleft().rstrip('\n\r')
line = line + '\n'
print ("Sending: ", line)
#print(input_string.encode())
self.connection.write(line.encode())
self.lastSerialWriteDT = datetime.now()
sleep(0.5)
#self.connection.flush()
except:
self.errorLog.append("Error writing to serial port")
if (len(self.txQ) > self.txQMax):
self.errorLog.append(self.name, "Serial TX Queue Full. Connection Problem?")
def run(self):
''' as a thread'''
while True:
if self.connected:
while self.connection.in_waiting > 0:
#print("connection.in_waiting > 0")
self.read_char = self.connection.read(size=1)
self.byte_count += 1
#print(self.read_char)
self.lastSerialReadDT = datetime.now()
if self.read_char == b'\n':
self.byte_count = 0
self.input_string = self.input_array.decode()
self.input_string = self.input_string.strip('\n\r')
#print(self.input_string)
if not self.input_string == "":
self.rxQ.append(self.input_string)
self.input_array = bytearray()
self.input_string = ""
else:
self.input_array += self.read_char
self.reading = False
else:
if self.hanging_prompts:
''' nothing in waiting - has it been a while? '''
if self.timeSinceLastRXTX() > 2:
'''still bytes in the input_array? '''
if self.byte_count > 0:
''' Assume an un terminated prompt'''
self.byte_count = 0
self.input_string = self.input_array.decode()
self.input_string = self.input_string.strip('\n\r')
#print(self.input_string)
self.rxQ.append(self.input_string)
self.input_array = bytearray()
self.input_string = ""
#print(self.input_array)
'''Outgoing'''
sleep(0.100)
while len(self.txQ) > 0:
self.output_string = self.txQ.popleft()
self.output_string.strip('\n\r')
self.output_string += '\n'
self.output_array = self.output_string.encode()
#self.connection.flush()
#sleep(0.3)
#print("Writing Line to Serial Port")
self.connection.write(self.output_array)
self.connection.flush()
self.lastSerialWriteDT = datetime.now()
sleep(0.100)
#sleep(SERIAL_POLL_INTERVAL)
class Adaptor():
def __init__(self, **kwargs):
super(Adaptor, self).__init__(**kwargs)
self.brokerToInterfaceQ = deque()
self.interfaceToBrokerQ = deque()
self.last_line_sent = ""
def config(self, interfaceObj, logQ, timeout=20):
#self.name = name
self.interface = interfaceObj
self.interfaceRxQ = interfaceObj.rxQ
self.interfaceTxQ = interfaceObj.txQ
self.log = logQ
self.timeout_period = timedelta(seconds=timeout)
def connectInterface(self):
self.interface.connect()
def interfaceIsConnected(self):
return self.interface.connected
def sendNextLineToInterface(self):
if len(self.brokerToInterfaceQ) > 0:
self.last_line_sent = self.brokerToInterfaceQ.popleft()
# Trim trailing space
self.last_line_sent = self.last_line_sent.strip()
self.interfaceTxQ.append(self.last_line_sent)
print(self.last_line_sent)
def lineAvailableFromInterface(self):
return (len(self.interfaceRxQ) > 0)
def appendForSendingToInterface(self, line):
self.brokerToInterfaceQ.append(line)
def getLineForBroker(self):
return self.interfaceToBrokerQ.popleft()
def appendForBrokerCollection(self, line):
self.interfaceToBrokerQ.append(line)
def lineAvailableForBroker(self):
return (len(self.interfaceToBrokerQ) > 0)
def getLineFromInterface(self):
return self.interfaceRxQ.popleft()
def timeSinceLastInterfaceRXTX(self):
return self.interface.timeSinceLastRXTX()
class FodderArduino(Adaptor, Utils):
def __init__(self, **kwargs):
super(FodderArduino, self).__init__(**kwargs)
self.response = ""
self.itchActivated = False
self.response_list = []
self.command = "RunLinemode"
self.config_ack = False
def setup(self):
self.interface.config("/dev/ttyACM0", 9600)
def mode_command(self, cmd="RunLinemode"):
self.command = cmd
def run(self):
''' As a thread'''
while True:
if self.command == "RunLinemode":
if self.interfaceIsConnected():
if self.lineAvailableFromInterface():
#print("lineAvailableFromInterface")
self.appendForBrokerCollection(self.getLineFromInterface())
if len(self.brokerToInterfaceQ) > 0:
self.sendNextLineToInterface()
elif self.command == "ActivateItch":
self.appendForBrokerCollection("Recieved ActivateItch Command")
self.activateItch()
self.appendForBrokerCollection("Returning to RunLinemode")
self.command = "RunLinemode"
elif self.command == "ConfigureDevice":
self.appendForBrokerCollection("Recieved ConfigureDevice Command")
self.processConfigCommands()
self.appendForBrokerCollection("Returning to RunLinemode After process config commands")
self.command = "RunLinemode"
def activateItch(self):
if self.interfaceIsConnected():
while self.timeSinceLastInterfaceRXTX() < 3:
sleep(1)
print (self.timeSinceLastInterfaceRXTX())
self.brokerToInterfaceQ.append("%%%")
self.sendNextLineToInterface()
while True:
''' add timeout! '''
if self.lineAvailableFromInterface():
self.response = self.getLineFromInterface()
#print (self.response)
self.appendForBrokerCollection(self.response)
if self.response == "OK":
#print ("BINGO!")
self.appendForBrokerCollection("ITCH Text CCC mode detected by adaoptor")
self.itchActivated = True
return True
def processConfigCommands(self):
if not self.itchActivated:
#self.log.append("Adaptor:FodderArduino:processConfigCommands itch is not activated on interface device")
print("Adaptor:FodderArduino:processConfigCommands itch is not activated on interface device")
return
#check for commands in queue
self.timeout_start()
while True:
if len(self.brokerToInterfaceQ) > 0:
break
if self.timeout():
#self.log.append("Adaptor:FodderArduino:processConfigCommands timed out waiting for commands")
print("Adaptor:FodderArduino:processConfigCommands timed out waiting for commands")
return
while len(self.brokerToInterfaceQ) > 0:
self.sendNextLineToInterface()
self.config_ack = False
#self.response_list.append(self.last_line_sent)
#print(self.last_line_sent)
self.timeout_start()
while not self.timeout():
#if self.lineAvailableFromInterface():
#self.response = self.getLineFromInterface()
#self.appendForBrokerCollection(self.response)
#if self.response in self.response_list:
# print("got echo")
# echo of send now reieved back
# look for command prompt return
# ignoring anything else
self.tries = 100
while self.tries > 0:
if self.lineAvailableFromInterface():
self.tries -= 1
self.response = self.getLineFromInterface()
#print("Got a line, sending it")
self.appendForBrokerCollection(self.response)
if self.response == "OK":
#print("got OK")
self.config_ack = True
break
if self.response == "ERROR":
#print("got ERROR")
self.config_ack = True
break
else:
self.appendForBrokerCollection("Adaptor:FodderArduino:processConfigCommands: "
"100 reponses without OK or ERROR prompt return")
#self.response_list.clear()
#print("breaking timeout loop")
if self.config_ack:
break
else:
self.appendForBrokerCollection("Adaptor:FodderArduino:processConfigCommands: "
"Timed out waiting for reponse from command send")
#sleep(0.500)
class Broker():
def __init__(self, **kwargs):
super(Broker, self).__init__(**kwargs)
def attachAdaptor(self, adaptorObj):
self.adaptor = adaptorObj
def attachBus(self, busObj):
self.bus = busObj
def run(self):
# poll the adaptor - will perhaps wake the arduino will a DTR
self.adaptor.appendForSendingToInterface("\n")
while True:
while len(self.bus) > 0:
self.adaptor.appendForSendingToInterface(self.bus.popleft())
while self.adaptor.lineAvailableForBroker():
print(self.adaptor.getLineForBroker())
sleep(BROKER_POLL_INTERVAL)
class System():
def __init__(self, **kwargs):
super(System, self).__init__(**kwargs)
self.mainBus = deque()
self.log_queue = deque()
def config(self):
''' generalise later'''
self.serialToBox = LinuxSerialPort()
self.serialToBox.config("/dev/ttyACM0", 9600)
self.FFAdaptor = FodderArduino();
self.FFAdaptor.config(self.serialToBox, self.log_queue)
self.FFAdaptor.connectInterface() # Will init serial and connect
self.FFBroker = Broker()
self.FFBroker.attachAdaptor(self.FFAdaptor)
self.FFBroker.attachBus(self.mainBus)
''' Start threads '''
self.interface_thread = Thread(target=self.serialToBox.run)
self.interface_thread.setDaemon(True)
self.interface_thread.start()
print("Interface thread started")
self.adaptor_thread = Thread(target=self.FFAdaptor.run)
self.adaptor_thread.setDaemon(True)
self.adaptor_thread.start()
print("Adaptor thread started")
self.broker_thread = Thread(target=self.FFBroker.run)
self.broker_thread.setDaemon(True)
self.broker_thread.start()
print("Broker thread started")
def report(self, reportStr):
print(reportStr)
def run(self):
''' Main loop, turn into thread later'''
#self.serialToBox.run()
#self.FFBroker.run()
sleep(20)
self.FFAdaptor.mode_command("ActivateItch")
sleep(10)
self.command_retry = 3
self.command_finished = False
while (self.command_retry > 0) and (not self.command_finished):
if self.FFAdaptor.itchActivated:
self.mainBus.append("init disable all") #suppress messages
#self.mainBus.append("config reset")
#self.cf = open("/home/brendan/git/FodderFactory/ff_simulator/CONFIG.TXT", "r")
self.cf = open("CONFIG.TXT", "r")
self.cf_lines = self.cf.readlines();
for self.input_string in self.cf_lines:
self.line_clean = self.input_string.rstrip('\n\r')
#print(self.line_clean)
self.mainBus.append(self.line_clean)
#sleep(0.300)
self.mainBus.append("config save")
#self.mainBus.append("init val all")
#self.mainBus.append("init set all")
self.mainBus.append("\x04")
print("Commands Queued to mainBus")
self.FFAdaptor.mode_command("ConfigureDevice")
self.command_finished = True
break
else:
print("Error sending commands. ITCH Not activated. Retrying in 20 seconds")
self.command_retry -= 1
sleep(20)
else:
print("Error waiting for itch. Giving up")
exit(0)
while True:
print("System.Run Sleeping")
sleep(30)
def main():
app = System()
app.config()
app.run()
if __name__ == '__main__':
main()
'''
Need:
Interfaces:
Serial
File
TCP Socket
Database
Adaptors - Know the protocal to communicate with their counter part
ff_device
ff_controller
Other broker
Queues:
Arbitary contents
Communicate between Adaptors and broker threads
Bus:
A central queue through which all traffic flows
Broker:
A thread assigned to handle traffic between the Bus and a number of queues
System:
Read a config
Set up Interfaces
Setup Adaptors and associate them with an interface
Connect in and outbound queues to the adaptors and the bus
Assign a broker to look after a queue or queues
Launch it all and monitor
Add more brokers / queues if bottlenecking
Source quench and rate limiting
'''
pass
|
|
# encoding: utf-8
from django.conf import settings
import json
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib import messages
from django.utils.translation import ugettext as _
import seaserv
from seaserv import seafile_api
from forms import DetailedProfileForm
from models import Profile, DetailedProfile
from utils import refresh_cache
from seahub.auth.decorators import login_required
from seahub.utils import is_org_context, clear_token
from seahub.base.accounts import User
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.contacts.models import Contact
from seahub.options.models import UserOptions, CryptoOptionNotSetError
from seahub.views import get_owned_repo_list
@login_required
def edit_profile(request):
"""
Show and edit user profile.
"""
username = request.user.username
form_class = DetailedProfileForm
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
form.save(username=username)
messages.success(request, _(u'Successfully edited profile.'))
# refresh nickname cache
refresh_cache(request.user.username)
return HttpResponseRedirect(reverse('edit_profile'))
else:
messages.error(request, _(u'Failed to edit profile'))
else:
profile = Profile.objects.get_profile_by_user(username)
d_profile = DetailedProfile.objects.get_detailed_profile_by_user(
username)
init_dict = {}
if profile:
init_dict['nickname'] = profile.nickname
init_dict['intro'] = profile.intro
if d_profile:
init_dict['department'] = d_profile.department
init_dict['telephone'] = d_profile.telephone
form = form_class(init_dict)
# common logic
try:
server_crypto = UserOptions.objects.is_server_crypto(username)
except CryptoOptionNotSetError:
# Assume server_crypto is ``False`` if this option is not set.
server_crypto = False
sub_lib_enabled = UserOptions.objects.is_sub_lib_enabled(username)
default_repo_id = UserOptions.objects.get_default_repo(username)
if default_repo_id:
default_repo = seafile_api.get_repo(default_repo_id)
else:
default_repo = None
owned_repos = get_owned_repo_list(request)
return render_to_response('profile/set_profile.html', {
'form': form,
'server_crypto': server_crypto,
"sub_lib_enabled": sub_lib_enabled,
'force_server_crypto': settings.FORCE_SERVER_CRYPTO,
'default_repo': default_repo,
'owned_repos': owned_repos,
}, context_instance=RequestContext(request))
@login_required
def user_profile(request, username_or_id):
# fetch the user by username or id, try id first
user = None
try:
user_id = int(username_or_id)
try:
user = User.objects.get(id=user_id)
except:
pass
except ValueError:
try:
user = User.objects.get(email=username_or_id)
except User.DoesNotExist:
pass
nickname = '' if user is None else email2nickname(user.username)
if user is not None:
profile = Profile.objects.get_profile_by_user(user.username)
intro = profile.intro if profile else ''
d_profile = DetailedProfile.objects.get_detailed_profile_by_user(
user.username)
if user.username == request.user.username:
add_to_contacts = False
else:
c = Contact.objects.get_contact_by_user(request.user.username,
user.username)
add_to_contacts = True if c is None else False
else:
intro = _(u'Has not accepted invitation yet')
d_profile = None
add_to_contacts = False
return render_to_response('profile/user_profile.html', {
'username_or_id': username_or_id,
'user': user,
'nickname': nickname,
'intro': intro,
'add_to_contacts': add_to_contacts,
'd_profile': d_profile,
}, context_instance=RequestContext(request))
@login_required
def get_user_profile(request, user):
data = {
'email': user,
'user_nickname': '',
'user_intro': '',
'err_msg': '',
'new_user': ''
}
content_type = 'application/json; charset=utf-8'
try:
user_check = User.objects.get(email=user)
except User.DoesNotExist:
user_check = None
if user_check:
profile = Profile.objects.filter(user=user)
if profile:
profile = profile[0]
data['user_nickname'] = profile.nickname
data['user_intro'] = profile.intro
else:
data['user_intro'] = _(u'Has not accepted invitation yet')
if user == request.user.username or \
Contact.objects.filter(user_email=request.user.username,
contact_email=user).count() > 0:
data['new_user'] = False
else:
data['new_user'] = True
return HttpResponse(json.dumps(data), content_type=content_type)
@login_required
def delete_user_account(request):
username = request.user.username
if username == '[email protected]':
messages.error(request, _(u'Demo account can not be deleted.'))
next = request.META.get('HTTP_REFERER', settings.SITE_ROOT)
return HttpResponseRedirect(next)
user = User.objects.get(email=username)
user.delete()
clear_token(username)
if is_org_context(request):
org_id = request.user.org.org_id
seaserv.ccnet_threaded_rpc.remove_org_user(org_id, username)
return HttpResponseRedirect(settings.LOGIN_URL)
@login_required
def default_repo(request):
"""Handle post request to create default repo for user.
"""
if request.method != 'POST':
raise Http404
repo_id = request.POST.get('dst_repo', '')
referer = request.META.get('HTTP_REFERER', None)
next = settings.SITE_ROOT if referer is None else referer
repo = seafile_api.get_repo(repo_id)
if repo is None:
messages.error(request, _('Failed to set default library.'))
return HttpResponseRedirect(next)
if repo.encrypted:
messages.error(request, _('Can not set encrypted library as default library.'))
return HttpResponseRedirect(next)
username = request.user.username
UserOptions.objects.set_default_repo(username, repo.id)
messages.success(request, _('Successfully set "%s" as your default library.') % repo.name)
return HttpResponseRedirect(next)
|
|
from fontPens.digestPointPen import DigestPointPen
from fontTools.misc import arrayTools as ftArrayTools
import defcon
from .tools import (
unwrapPoint,
calculateAngle
)
from . import registry
from .wrappers import *
# Stem Consistency
def testStemWidths(glyph):
"""
Stem widths should be consistent.
Data structure:
{
horizontal : [(y1, y2, [x1, x2, ...]), ...]
vertical : [(x1, x2, [y1, y2, ...]), ...]
}
"""
font = wrapFont(glyph.font)
layer = font.getLayer(glyph.layer.name)
glyph = layer[glyph.name]
hProblems = vProblems = None
tolerance = 5
# horizontal
hStems = [_StemWrapper(v, tolerance) for v in font.info.postscriptStemSnapH]
if hStems:
hProblems = _findStemProblems(glyph, hStems, "h")
# vertical
vStems = [_StemWrapper(v, tolerance) for v in font.info.postscriptStemSnapV]
if vStems:
vProblems = _findStemProblems(glyph, vStems, "v")
# report
data = dict(horizontal=hProblems, vertical=vProblems)
return data
def _findStemProblems(glyph, targetStems, stemDirection):
stems = set()
# h/v abstraction
if stemDirection == "h":
primaryCoordinate = 1
secondaryCoordinate = 0
desiredClockwiseAngle = 0
desiredCounterAngle = 180
else:
primaryCoordinate = 0
secondaryCoordinate = 1
desiredClockwiseAngle = -90
desiredCounterAngle = 90
# structure the contour and line data for efficient processing
contours = {
True : [],
False : []
}
for contour in glyph:
contourDirection = contour.clockwise
bounds = contour.bounds
lines = {}
# line to
previous = unwrapPoint(contour[-1].onCurve)
for segment in contour:
point = unwrapPoint(segment.onCurve)
if segment.type == "line":
# only process completely horizontal/vertical lines
# that have a length greater than 0
if (previous[primaryCoordinate] == point[primaryCoordinate]) and (previous[secondaryCoordinate] != point[secondaryCoordinate]):
angle = calculateAngle(previous, point)
p = point[primaryCoordinate]
s1 = previous[secondaryCoordinate]
s2 = point[secondaryCoordinate]
s1, s2 = sorted((s1, s2))
if angle not in lines:
lines[angle] = {}
if p not in lines[angle]:
lines[angle][p] = []
lines[angle][p].append((s1, s2))
previous = point
# imply stems from curves by using BCP handles
previous = contour[-1]
for segment in contour:
if segment.type == "curve" and previous.type == "curve":
bcp1 = unwrapPoint(previous[1])
bcp2 = unwrapPoint(segment[-1])
if bcp1[primaryCoordinate] == bcp2[primaryCoordinate]:
angle = calculateAngle(bcp1, bcp2)
p = bcp1[primaryCoordinate]
s1 = bcp1[secondaryCoordinate]
s2 = bcp2[secondaryCoordinate]
s1, s2 = sorted((s1, s2))
if angle not in lines:
lines[angle] = {}
if p not in lines[angle]:
lines[angle][p] = []
lines[angle][p].append((s1, s2))
previous = segment
contours[contourDirection].append((bounds, lines))
# single contours
for clockwise, directionContours in contours.items():
for contour in directionContours:
bounds, data = contour
for angle1, lineData1 in data.items():
for angle2, lineData2 in data.items():
if angle1 == angle2:
continue
if clockwise and angle1 == desiredClockwiseAngle:
continue
if not clockwise and angle1 == desiredCounterAngle:
continue
for p1, lines1 in lineData1.items():
for p2, lines2 in lineData2.items():
if p2 <= p1:
continue
for s1a, s1b in lines1:
for s2a, s2b in lines2:
overlap = _linesOverlap(s1a, s1b, s2a, s2b)
if not overlap:
continue
w = p2 - p1
hits = []
for stem in targetStems:
if w == stem:
d = stem.diff(w)
if d:
hits.append((d, stem.value, (s1a, s1b, s2a, s2b)))
if hits:
hit = min(hits)
w = hit[1]
s = hit[2]
stems.add((p1, p1 + w, s))
# double contours to test
for clockwiseContour in contours[True]:
clockwiseBounds = clockwiseContour[0]
for counterContour in contours[False]:
counterBounds = counterContour[0]
overlap = ftArrayTools.sectRect(clockwiseBounds, counterBounds)[0]
if not overlap:
continue
clockwiseData = clockwiseContour[1]
counterData = counterContour[1]
for clockwiseAngle, clockwiseLineData in clockwiseContour[1].items():
for counterAngle, counterLineData in counterContour[1].items():
if clockwiseAngle == counterAngle:
continue
for clockwiseP, clockwiseLines in clockwiseLineData.items():
for counterP, counterLines in counterLineData.items():
for clockwiseSA, clockwiseSB in clockwiseLines:
for counterSA, counterSB in counterLines:
overlap = _linesOverlap(clockwiseSA, clockwiseSB, counterSA, counterSB)
if not overlap:
continue
w = abs(counterP - clockwiseP)
hits = []
for stem in targetStems:
if w == stem:
d = stem.diff(w)
if d:
hits.append((d, stem.value, (clockwiseSA, clockwiseSB, counterSA, counterSB)))
if hits:
p = min((clockwiseP, counterP))
hit = min(hits)
w = hit[1]
s = hit[2]
stems.add((p, p + w, s))
# done
return stems
class _StemWrapper(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return "<PS Stem Value: value=%d threshold=%d>" % (self.value, self.threshold)
def __eq__(self, other):
d = abs(self.value - other)
return d <= self.threshold
def diff(self, other):
return abs(self.value - other)
def _linesOverlap(a1, a2, b1, b2):
if a1 > b2 or a2 < b1:
return False
return True
registry.registerTest(
identifier="stemWidths",
level="glyph",
title="Stem Widths",
description="One or more stems do not match the registered values.",
testFunction=testStemWidths,
defconClass=defcon.Glyph,
destructiveNotifications=["Glyph.ContoursChanged"]
)
# Duplicate Contours
def testDuplicateContours(glyph):
"""
Contours shouldn't be duplicated on each other.
Data structure:
[
(contourIndex, bounds),
...
]
"""
glyph = wrapGlyph(glyph)
contours = {}
for index, contour in enumerate(glyph):
contour = contour.copy()
contour.autoStartSegment()
pen = DigestPointPen()
contour.drawPoints(pen)
digest = pen.getDigest()
if digest not in contours:
contours[digest] = []
contours[digest].append(index)
duplicateContours = []
for digest, indexes in contours.items():
if len(indexes) > 1:
duplicateContours.append((indexes[0], contour.bounds))
return duplicateContours
registry.registerTest(
identifier="duplicateContours",
level="glyph",
title="Duplicate Contours",
description="One or more contours are duplicated.",
testFunction=testDuplicateContours,
defconClass=defcon.Glyph,
destructiveNotifications=["Glyph.ContoursChanged"]
)
# Duplicate Components
def testDuplicateComponents(glyph):
"""
Components shouldn't be duplicated on each other.
[
(componentIndex, bounds),
...
]
"""
glyph = wrapGlyph(glyph)
duplicateComponents = []
components = set()
for index, component in enumerate(glyph.components):
key = (component.baseGlyph, component.transformation)
if key in components:
duplicateComponents.append((index, component.bounds))
components.add(key)
return duplicateComponents
registry.registerTest(
identifier="duplicateComponents",
level="glyph",
title="Duplicate Components",
description="One or more components are duplicated.",
testFunction=testDuplicateComponents,
defconClass=defcon.Glyph,
destructiveNotifications=["Glyph.ComponentsChanged"]
)
|
|
# Version: 2.3
# Author: Miguel Martinez Lopez
# Uncomment the next line to see my email
# print("Author's email: %s"%"61706c69636163696f6e616d656469646140676d61696c2e636f6d".decode("hex"))
try:
from Tkinter import Frame, Label
from Tkconstants import *
from tkFont import Font, nametofont
from ttk import Treeview, Style
except ImportError:
from tkinter import Frame, Label
from tkinter.constants import *
from tkinter.font import Font, nametofont
from tkinter.ttk import Treeview, Style
# Python 3 compatibility
try:
basestring
except NameError:
basestring = str
class Row(object):
def __init__(self, table, index):
self._multicolumn_listbox = table
self._index = index
def data(self):
return self._multicolumn_listbox.row_data(self._index)
def delete(self):
self._multicolumn_listbox.delete_row(self._index)
def update(self, data):
self._multicolumn_listbox.update_row(self._index, data)
def select(self):
self._multicolumn_listbox.select_row(self._index)
def deselect(self):
self._multicolumn_listbox.deselect_row(self._index)
def __str__(self):
return str(self.data())
def __len__(self):
return self._multicolumn_listbox.number_of_columns
class Column(object):
def __init__(self, table, index):
self._multicolumn_listbox = table
self._index = index
def data(self):
return self._multicolumn_listbox.column_data(self._index)
def delete(self):
self._multicolumn_listbox.delete_column(self._index)
def update(self, data):
self._multicolumn_listbox.update_column(self._index, data)
def __str__(self):
return str(self.data())
def __len__(self):
return self._multicolumn_listbox.number_of_rows
class Multicolumn_Listbox(object):
_style_index = 0
class List_Of_Rows(object):
def __init__(self, multicolumn_listbox):
self._multicolumn_listbox = multicolumn_listbox
def data(self, index):
return self._multicolumn_listbox.row_data(index)
def get(self, index):
return Row(self._multicolumn_listbox, index)
def insert(self, data, index=None):
self._multicolumn_listbox.insert_row(data, index)
def delete(self, index):
self._multicolumn_listbox.delete_row(index)
def update(self, index, data):
self._multicolumn_listbox.update_row(index, data)
def select(self, index):
self._multicolumn_listbox.select_row(index)
def deselect(self, index):
self._multicolumn_listbox.deselect_row(index)
def set_selection(self, indices):
self._multicolumn_listbox.set_selection(indices)
def __getitem__(self, index):
return self.get(index)
def __setitem__(self, index, value):
return self._multicolumn_listbox.update_row(index, value)
def __delitem__(self, index):
self._multicolumn_listbox.delete_row(index)
def __len__(self):
return self._multicolumn_listbox.number_of_rows
class List_Of_Columns(object):
def __init__(self, multicolumn_listbox):
self._multicolumn_listbox = multicolumn_listbox
def data(self, index):
return self._multicolumn_listbox.get_column(index)
def get(self, index):
return Column(self._multicolumn_listbox, index)
def delete(self, index):
self._multicolumn_listbox.delete_column(index)
def update(self, index, data):
self._multicolumn_listbox.update_column(index, data)
def __getitem__(self, index):
return self.get(index)
def __setitem__(self, index, value):
return self._multicolumn_listbox.update_column(index, value)
def __delitem__(self, index):
self._multicolumn_listbox.delete_column(index)
def __len__(self):
return self._multicolumn_listbox.number_of_columns
def __init__(self, master, columns, data=None, command=None, sort=True, select_mode=None, heading_anchor = CENTER, cell_anchor=W, style=None, height=None, padding=None, adjust_heading_to_content=False, stripped_rows=None, selection_background=None, selection_foreground=None, field_background=None, heading_font= None, heading_background=None, heading_foreground=None, cell_pady=2, cell_background=None, cell_foreground=None, cell_font=None, headers=True):
self._stripped_rows = stripped_rows
self._columns = columns
self._number_of_rows = 0
self._number_of_columns = len(columns)
self.row = self.List_Of_Rows(self)
self.column = self.List_Of_Columns(self)
s = Style()
if style is None:
style_name = "Multicolumn_Listbox%s.Treeview"%self._style_index
self._style_index += 1
else:
style_name = style
style_map = {}
if selection_background is not None:
style_map["background"] = [('selected', selection_background)]
if selection_foreground is not None:
style_map["foeground"] = [('selected', selection_foreground)]
if style_map:
s.map(style_name, **style_map)
style_config = {}
if cell_background is not None:
style_config["background"] = cell_background
if cell_foreground is not None:
style_config["foreground"] = cell_foreground
if cell_font is None:
font_name = s.lookup(style_name, "font")
cell_font = nametofont(font_name)
else:
if not isinstance(cell_font, Font):
if isinstance(cell_font, basestring):
cell_font = nametofont(cell_font)
else:
if len(font) == 1:
cell_font = Font(family=cell_font[0])
elif len(font) == 2:
cell_font = Font(family=cell_font[0], size=cell_font[1])
elif len(font) == 3:
cell_font = Font(family=cell_font[0], size=cell_font[1], weight=cell_font[2])
else:
raise ValueError("Not possible more than 3 values for font")
style_config["font"] = cell_font
self._cell_font = cell_font
self._rowheight = cell_font.metrics("linespace")+cell_pady
style_config["rowheight"]=self._rowheight
if field_background is not None:
style_config["fieldbackground"] = field_background
s.configure(style_name, **style_config)
heading_style_config = {}
if heading_font is not None:
heading_style_config["font"] = heading_font
if heading_background is not None:
heading_style_config["background"] = heading_background
if heading_foreground is not None:
heading_style_config["foreground"] = heading_foreground
heading_style_name = style_name + ".Heading"
s.configure(heading_style_name, **heading_style_config)
treeview_kwargs = {"style": style_name}
if height is not None:
treeview_kwargs["height"] = height
if padding is not None:
treeview_kwargs["padding"] = padding
if headers:
treeview_kwargs["show"] = "headings"
else:
treeview_kwargs["show"] = ""
if select_mode is not None:
treeview_kwargs["selectmode"] = select_mode
self.interior = Treeview(master, columns=columns, **treeview_kwargs)
if command is not None:
self._command = command
self.interior.bind("<<TreeviewSelect>>", self._on_select)
for i in range(0, self._number_of_columns):
if sort:
self.interior.heading(i, text=columns[i], anchor=heading_anchor, command=lambda col=i: self.sort_by(col, descending=False))
else:
self.interior.heading(i, text=columns[i], anchor=heading_anchor)
if adjust_heading_to_content:
self.interior.column(i, width=Font().measure(columns[i]))
self.interior.column(i, anchor=cell_anchor)
if data is not None:
for row in data:
self.insert_row(row)
@property
def row_height(self):
return self._rowheight
@property
def font(self):
return self._cell_font
def configure_column(self, index, width=None, minwidth=None, anchor=None, stretch=None):
kwargs = {}
for config_name in ("width", "anchor", "stretch", "minwidth"):
config_value = locals()[config_name]
if config_value is not None:
kwargs[config_name] = config_value
self.interior.column('#%s'%(index+1), **kwargs)
def row_data(self, index):
try:
item_ID = self.interior.get_children()[index]
except IndexError:
raise ValueError("Row index out of range: %d"%index)
return self.item_ID_to_row_data(item_ID)
def update_row(self, index, data):
try:
item_ID = self.interior.get_children()[index]
except IndexError:
raise ValueError("Row index out of range: %d"%index)
if len(data) == len(self._columns):
self.interior.item(item_ID, values=data)
else:
raise ValueError("The multicolumn listbox has only %d columns"%self._number_of_columns)
def delete_row(self, index):
list_of_items = self.interior.get_children()
try:
item_ID = list_of_items[index]
except IndexError:
raise ValueError("Row index out of range: %d"%index)
self.interior.delete(item_ID)
self._number_of_rows -= 1
if self._stripped_rows:
for i in range(index, self._number_of_rows):
self.interior.tag_configure(list_of_items[i+1], background=self._stripped_rows[i%2])
def insert_row(self, data, index=None):
if len(data) != self._number_of_columns:
raise ValueError("The multicolumn listbox has only %d columns"%self._number_of_columns)
if index is None:
index = self._number_of_rows-1
item_ID = self.interior.insert('', index, values=data)
self.interior.item(item_ID, tags=item_ID)
self._number_of_rows += 1
if self._stripped_rows:
list_of_items = self.interior.get_children()
self.interior.tag_configure(item_ID, background=self._stripped_rows[index%2])
for i in range(index+1, self._number_of_rows):
self.interior.tag_configure(list_of_items[i], background=self._stripped_rows[i%2])
def column_data(self, index):
return [self.interior.set(child_ID, index) for child_ID in self.interior.get_children('')]
def update_column(self, index, data):
for i, item_ID in enumerate(self.interior.get_children()):
data_row = self.item_ID_to_row_data(item_ID)
data_row[index] = data[i]
self.interior.item(item_ID, values=data_row)
return data
def clear(self):
# Another possibility:
# self.interior.delete(*self.interior.get_children())
for row in self.interior.get_children():
self.interior.delete(row)
self._number_of_rows = 0
def update(self, data):
self.clear()
for row in data:
self.insert_row(row)
def focus(self, index=None):
if index is None:
return self.interior.item(self.interior.focus())
else:
item = self.interior.get_children()[index]
self.interior.focus(item)
def state(self, state=None):
if stateSpec is None:
return self.interior.state()
else:
self.interior.state(state)
@property
def number_of_rows(self):
return self._number_of_rows
@property
def number_of_columns(self):
return self._number_of_columns
def toogle_selection(self, index):
list_of_items = self.interior.get_children()
try:
item_ID = list_of_items[index]
except IndexError:
raise ValueError("Row index out of range: %d"%index)
self.interior.selection_toggle(item_ID)
def select_row(self, index):
list_of_items = self.interior.get_children()
try:
item_ID = list_of_items[index]
except IndexError:
raise ValueError("Row index out of range: %d"%index)
self.interior.selection_add(item_ID)
def deselect_row(self, index):
list_of_items = self.interior.get_children()
try:
item_ID = list_of_items[index]
except IndexError:
raise ValueError("Row index out of range: %d"%index)
self.interior.selection_remove(item_ID)
def deselect_all(self):
self.interior.selection_remove(self.interior.selection())
def set_selection(self, indices):
list_of_items = self.interior.get_children()
self.interior.selection_set(" ".join(list_of_items[row_index] for row_index in indices))
@property
def selected_rows(self):
data = []
for item_ID in self.interior.selection():
data_row = self.item_ID_to_row_data(item_ID)
data.append(data_row)
return data
@property
def indices_of_selected_rows(self):
list_of_indices = []
for index, item_ID in enumerate(self.interior.get_children()):
if item_ID in self.interior.selection():
list_of_indices.append(index)
return list_of_indices
def delete_all_selected_rows(self):
selected_items = self.interior.selection()
for item_ID in selected_items:
self.interior.delete(item_ID)
number_of_deleted_rows = len(selected_items)
self._number_of_rows -= number_of_deleted_rows
return number_of_deleted_rows
def _on_select(self, event):
for item_ID in event.widget.selection():
data_row = self.item_ID_to_row_data(item_ID)
self._command(data_row)
def item_ID_to_row_data(self, item_ID):
item = self.interior.item(item_ID)
return item["values"]
@property
def table_data(self):
data = []
for item_ID in self.interior.get_children():
data_row = self.item_ID_to_row_data(item_ID)
data.append(data_row)
return data
@table_data.setter
def table_data(self, data):
self.update(data)
def cell_data(self, row, column):
"""Get the value of a table cell"""
try:
item = self.interior.get_children()[row]
except IndexError:
raise ValueError("Row index out of range: %d"%row)
return self.interior.set(item, column)
def update_cell(self, row, column, value):
"""Set the value of a table cell"""
item_ID = self.interior.get_children()[row]
data = self.item_ID_to_row_data(item_ID)
data[column] = value
self.interior.item(item_ID, values=data)
def __getitem__(self, index):
if isinstance(index, tuple):
row, column = index
return self.cell_data(row, column)
else:
raise Exception("Row and column indices are required")
def __setitem__(self, index, value):
if isinstance(index, tuple):
row, column = index
self.update_cell(row, column, value)
else:
raise Exception("Row and column indices are required")
def bind(self, event, handler):
self.interior.bind(event, handler)
def sort_by(self, col, descending):
"""
sort tree contents when a column header is clicked
"""
# grab values to sort
data = [(self.interior.set(child_ID, col), child_ID) for child_ID in self.interior.get_children('')]
# if the data to be sorted is numeric change to float
try:
data = [(float(number), child_ID) for number, child_ID in data]
except ValueError:
pass
# now sort the data in place
data.sort(reverse=descending)
for idx, item in enumerate(data):
self.interior.move(item[1], '', idx)
# switch the heading so that it will sort in the opposite direction
self.interior.heading(col, command=lambda col=col: self.sort_by(col, not descending))
if self._stripped_rows:
list_of_items = self.interior.get_children('')
for i in range(len(list_of_items)):
self.interior.tag_configure(list_of_items[i], background=self._stripped_rows[i%2])
def destroy(self):
self.interior.destroy()
def item_ID(self, index):
return self.interior.get_children()[index]
if __name__ == '__main__':
try:
from Tkinter import Tk
import tkMessageBox as messagebox
except ImportError:
from tkinter import Tk
from tkinter import messagebox
root = Tk()
def on_select(data):
print("called command when row is selected")
print(data)
print("\n")
def show_info(msg):
messagebox.showinfo("Table Data", msg)
mc = Multicolumn_Listbox(root, ["column one","column two", "column three"], stripped_rows = ("white","#f2f2f2"), command=on_select, cell_anchor="center")
mc.interior.pack()
mc.insert_row([1,2,3])
show_info("mc.insert_row([1,2,3])")
mc.row.insert([4,5,7])
show_info("mc.row.insert([4,5,7])")
mc.update_row(0, [7,8,9])
show_info("mc.update_row(0, [4,5,6])")
mc.update([[1,2,3], [4,5,6]])
show_info("mc.update([[1,2,3], [4,5,6]])")
mc.select_row(0)
show_info("mc.select_row(0)")
print("mc.selected_rows")
print(mc.selected_rows)
print("\n")
print("mc.table_data")
print(mc.table_data)
print("\n")
print("mc.row[0]")
print(mc.row[0])
print("\n")
print("mc.row_data(0)")
print(mc.row_data(0))
print("\n")
print("mc.column[1]")
print(mc.column[1])
print("\n")
print("mc[0,1]")
print(mc[0,1])
print("\n")
mc.column[1] = ["item1", "item2"]
mc.update_column(2, [8,9])
show_info("mc.update_column(2, [8,9])")
mc.clear()
show_info("mc.clear()")
mc.table_data = [[1,2,3], [4,5,6], [7,8,9], [10,11,12], [13,14,15], [16,17,18], [19,20,21]]
show_info("mc.table_data = [[1,2,3], [4,5,6], [7,8,9], [10,11,12], [13,14,15], [16,17,18], [19,20,21]]")
mc.delete_row(1)
show_info("mc.delete_row(1)")
row = mc.row[0].update([2,4,5])
show_info("mc.row[0].update([2,4,5])")
root.mainloop()
|
|
####
# compile.py
#!/usr/bin/env python
import re
import os
import sys
import new
import imp
import time
import struct
import marshal
import compiler
from compiler.ast import Const, AssName, AssTuple
__author__ = 'Shimomura Ikkei'
__date__ = '2005-06-23'
__all__ = ['ConstantCompiler']
# Check the string is valid constant name,
isConstName = re.compile('^[A-Z][A-Z_]+$').match
def ispyfile(filename):
"ispyfile(filename) ... The file is python source file."
assert isinstance(filename, str) and filename
return filename.endswith('.py') and os.path.isfile(filename)
def change_extension(name, ext='.pyc'):
"change_extension(name, ext) ... Rename exstension."
assert isinstance(name, str) and name
assert isinstance(ext, str) and ext
assert ext.startswith('.'), 'File extension must starts with dot.'
return os.path.splitext(name)[0] + ext
class ConstantVisitor:
def __init__(self, constants):
self.constants = constants
def __registerConstant(self, node, assign, const):
assert isinstance(assign, AssName)
if isConstName(assign.name):
if self.constants.has_key(assign.name):
print "Warning: %s at line %d: '%s' is already defined." % \
(node.filename, node.lineno, assign.name)
else:
if isinstance(const, Const):
self.constants[assign.name] = const.value
else:
self.constants[assign.name] = None # dummy data
def visitAssign(self, node):
nodes = node.getChildren()
if isinstance(nodes[0], AssName):
name, const = nodes
self.__registerConstant(node, name, const)
elif isinstance(nodes[0], AssTuple):
names, consts = nodes
names = names.getChildren()
consts = consts.getChildren()
assert len(names) == len(consts)
for name, const in zip(names, consts):
self.__registerConstant(node, name, const)
def visitName(self, node):
assert isinstance(node, compiler.ast.Name)
if isConstName(node.name) and self.constants.has_key(node.name):
value = self.constants.get(node.name)
# If the value can be constant(int, long, float, str, ...)
if [True for type in (int, long, float, str) if isinstance(value, type)]:
node.__class__ = Const
node.value = value
del node.name
class ConstantCompiler:
def __init__(self, filename=None):
self.constants = {}
if os.path.isfile(filename) and filename.endswith('.py'):
self.__load_constants(filename)
def __load_constants(self, filename):
assert isinstance(filename, str) and filename.endswith('.py')
assert os.path.isfile(filename) and os.access(filename, os.R_OK)
try:
fh, filename, opts = imp.find_module(os.path.splitext(filename)[0])
mod = imp.load_module("", fh, filename, opts)
for k,v in ((x,getattr(mod,x)) for x in dir(mod) if isConstName(x)):
self.constants[k] = v
except ImportError:
print "Failed to import module '%s'" % filename
def __walk_ast(self, ast):
compiler.walk(ast, ConstantVisitor(self.constants))
def compile(self, filename):
assert isinstance(filename, str) and filename
assert os.path.isfile(filename) and filename.endswith('.py')
# Parse python source -> AST(Abstract Syntax Tree)
src = open(filename, 'r')
ast = compiler.parse(src.read())
src.close()
# Syntax Macro (Expand constant values before compile)
compiler.misc.set_filename(filename, ast)
compiler.syntax.check(ast)
self.__walk_ast(ast)
# Compile AST -> code object.
code = compiler.pycodegen.ModuleCodeGenerator(ast).getCode()
return CodeWrapper(filename, code)
class CodeWrapper:
"""An utility class to save code object as .pyc file."""
def __init__(self, src_filename, code):
"CodeWrapper(code) This class only wrap an object for method chain."
assert isinstance(src_filename, str) and src_filename
assert os.path.isfile(src_filename) and src_filename.endswith('.py')
assert isinstance(code, new.code)
self.src_filename = src_filename
self.__code = code
def getCode(self):
"getCode() ... Returns code object."
assert isinstance(self.__code, new.code)
return self.__code
def __timestamp(self, pyc_filename):
"__get_timestamp(pyc_filename) Gets timestamp stored in .pyc file."
assert isinstance(pyc_filename, str) and pyc_filename
assert pyc_filename.endswith('.pyc')
assert os.path.isfile(pyc_filename)
assert os.access(pyc_filename, os.R_OK)
try:
pyc = open(pyc_filename, 'rb')
# The first 4 bytes is a magic number.for pyc file.
# this checks the python's version.
if pyc.read(4) == imp.get_magic():
# The next 4 bytes is the timestamp stored as long,
# we need this value.
return struct.unpack("<l", pyc.read(4))[0]
else:
# Not .pyc file or wrong version of python.
# It should be always updated.
return -1
finally:
pyc.close()
def __modified(self, src, pyc):
"__modified(src_filename, pyc_filename) Returns True if src updated."
assert isinstance(src, str) and src and src.endswith('.py')
assert isinstance(pyc, str) and pyc and pyc.endswith('.pyc')
assert os.path.isfile(src)
# If not exists .pyc file then always True.
if not os.path.isfile(pyc):
return True
# Is source's modified time newer than .pyc's timestamp ?
return os.stat(src)[9] > self.__timestamp(pyc)
def save_as(self, pyc_filename):
"save_as(pyc_filename) ... Save current code object to .pyc file."
assert isinstance(self.__code, new.code)
assert isinstance(pyc_filename, str) and pyc_filename
assert pyc_filename.endswith('.pyc')
# Skip if the file was already updated.
if self.__modified(self.src_filename, pyc_filename):
# Output dump the code object to .pyc file.
pyc = open(pyc_filename, 'wb')
pyc.write(imp.get_magic())
pyc.write(struct.pack('<l', time.time()))
marshal.dump(self.__code, pyc)
pyc.close()
assert os.path.isfile(pyc_filename)
assert os.path.getsize(pyc_filename) > 0
def main(const_file, *argv):
pyc = ConstantCompiler(const_file)
for filename in filter(os.path.exists, argv):
pyc.compile(filename).save_as(change_extension(filename, ext='.pyc'))
if __name__ == '__main__':
main(*sys.argv[1:])
####
# define_constants.py
import math
PI = math.atan(1) * 4.0
DEBUG = 1
####
# test_constants.py
print PI
def foo(num):
if DEBUG:
print "debug foo(%d)" num
print num
for i in range(20): foo(i)
####
# how to run
# python compile.py define_constants.py test_constants.py
|
|
from datetime import datetime
import errno
import fcntl
import os
import socket
import sys
import traceback
import threading
import xml.etree.ElementTree as ET
from zmq.eventloop import ioloop
from tornado import iostream
from pyfire import configuration as config
from pyfire.errors import XMPPProtocolError
from pyfire.logger import Logger
from pyfire.stream import processor
from pyfire.stream.stanzas import TagHandler
log = Logger(__name__)
class XMPPServer(object):
"""A non-blocking, single-threaded XMPP server."""
def __init__(self, io_loop=None):
self.io_loop = io_loop or ioloop.IOLoop.instance()
self._sockets = {} # fd -> socket object
self._started = False
self._connections = {}
self.checker = ioloop.PeriodicCallback(
self.check_for_closed_connections, 30000)
def listen(self, port, address=""):
"""Binds to the given port and starts the server in a single process.
This method is a shortcut for:
server.bind(port, address)
server.start()
"""
self.bind(port, address)
self.start()
def bind(self, port, address=None, family=socket.AF_UNSPEC):
"""Binds this server to the given port on the given address.
To start the server, call start(). You can call listen() as
a shortcut to the sequence of bind() and start() calls.
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either socket.AF_INET
or socket.AF_INET6 to restrict to ipv4 or ipv6 addresses, otherwise
both will be used if available.
This method may be called multiple times prior to start() to listen
on multiple ports or interfaces.
"""
if address == "":
address = None
for res in socket.getaddrinfo(address, port, family,
socket.SOCK_STREAM, 0,
socket.AI_PASSIVE | socket.AI_ADDRCONFIG):
af, socktype, proto, canonname, sockaddr = res
sock = socket.socket(af, socktype, proto)
flags = fcntl.fcntl(sock.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, flags)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
sock.setblocking(0)
sock.bind(sockaddr)
sock.listen(128)
log.info("Starting to listen on IP %s Port %s for connections" % sockaddr)
self._sockets[sock.fileno()] = sock
if self._started:
self.io_loop.add_handler(sock.fileno(), self._handle_events,
ioloop.IOLoop.READ)
def start(self):
"""Starts this server in the IOLoop."""
assert not self._started
for fd in self._sockets.keys():
self.io_loop.add_handler(fd, self._handle_events,
ioloop.IOLoop.READ)
def stop(self):
"""Stops listening for new connections.
Streams currently running may still continue after the
server is stopped.
"""
for fd, sock in self._sockets.iteritems():
self.io_loop.remove_handler(fd)
sock.close()
def _handle_events(self, fd, events):
while True:
try:
connection, address = self._sockets[fd].accept()
except socket.error, e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
raise
try:
stream = iostream.IOStream(connection, io_loop=self.io_loop)
log.info("Starting new connection for client connection from %s:%s" % address)
self._connections[address] = XMPPConnection(stream, address)
if not self.checker._running:
self.checker.start()
except Exception, e:
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error("Error in connection callback, %s" % str(e))
for line in traceback.format_tb(exc_traceback):
if line.find("\n") >= 0:
for subline in line.split("\n"):
log.error(subline)
else:
log.error(line.rstrip("\n"))
def check_for_closed_connections(self):
log.debug("checking for closed connections")
for address in self._connections.keys():
connection = self._connections[address]
if connection.closed():
log.debug("detected dead stream/connection: %s:%s" % connection.address)
del self._connections[address]
if len(self._connections) == 0:
log.debug("stopping checker")
self.checker.stop()
class XMPPConnection(object):
"""One XMPP connection initiated by class:`XMPPServer`"""
def __init__(self, stream, address):
self.stream = stream
self.address = address
self.connectiontime = self.last_seen = datetime.now()
self.taghandler = TagHandler(self)
self.parser = processor.StreamProcessor(
self.taghandler.streamhandler,
self.taghandler.contenthandler)
self.stream.read_bytes(1, self._read_char)
def _read_char(self, data):
"""Reads from client in byte mode"""
try:
if data == " ":
log.debug("Found whitespace keepalive")
self.stream.read_bytes(1, self._read_char)
else:
log.debug("Processing byte: %s" % data)
self.parser.feed(data)
self.stream.read_until(">", self._read_xml)
self.last_seen = datetime.now()
except IOError:
self.done()
def _read_xml(self, data):
"""Reads from client until closing tag for xml is found"""
try:
self.last_seen = datetime.now()
log.debug("Processing chunk: %s" % data)
self.parser.feed(data)
if self.parser.depth >= 2:
self.stream.read_until(">", self._read_xml)
else:
self.stream.read_bytes(1, self._read_char)
except IOError:
self.done()
def send_string(self, string, raises_error=True):
"""Sends a string to client"""
try:
self.stream.write(string)
log.debug("Sent string to client:" + string)
except IOError:
if raises_error:
raise
def send_element(self, element, raises_error=True):
"""Serializes and send an ET Element"""
self.send_string(ET.tostring(element), raises_error)
def stop_connection(self):
"""Sends stream close, discards stream closed errors"""
# Ignore IOErrors as stream already has been closed
# as there is no need so send stream end element on closed streams ;)
try:
self.taghandler.close()
self.send_string("</stream:stream>")
except IOError:
pass
self.done()
def done(self):
"""Does cleanup work"""
self.stream.close()
def closed(self):
"""Checks if underlying stream is closed"""
return self.stream.closed()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class StorageAccountsOperations(object):
"""StorageAccountsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-01-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-01-01"
self.config = config
def check_name_availability(
self, name, custom_headers=None, raw=False, **operation_config):
"""Checks that the storage account name is valid and is not already in
use.
:param name:
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckNameAvailabilityResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.storage.v2016_01_01.models.CheckNameAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
account_name = models.StorageAccountCheckNameAvailabilityParameters(name=name)
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_initial(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Asynchronously creates a new storage account with the specified
parameters. If an account is already created and a subsequent create
request is issued with different properties, the account properties
will be updated. If an account is already created and a subsequent
create or update request is issued with the exact same set of
properties, the request will succeed.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters:
~azure.mgmt.storage.v2016_01_01.models.StorageAccountCreateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
StorageAccount or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.storage.v2016_01_01.models.StorageAccount]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_properties(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Returns the properties for the specified storage account including but
not limited to name, SKU name, location, and account status. The
ListKeys operation should be used to retrieve storage keys.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: StorageAccount or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.storage.v2016_01_01.models.StorageAccount or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""The update operation can be used to update the SKU, encryption, access
tier, or tags for a storage account. It can also be used to map the
account to a custom domain. Only one custom domain is supported per
storage account; the replacement/change of custom domain is not
supported. In order to replace an old custom domain, the old value must
be cleared/unregistered before a new value can be set. The update of
multiple properties is supported. This call does not change the storage
keys for the account. If you want to change the storage account keys,
use the regenerate keys operation. The location and name of the storage
account cannot be changed after creation.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the updated account.
:type parameters:
~azure.mgmt.storage.v2016_01_01.models.StorageAccountUpdateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: StorageAccount or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.storage.v2016_01_01.models.StorageAccount or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the storage accounts available under the subscription. Note
that storage keys are not returned; use the ListKeys operation for
this.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of StorageAccount
:rtype:
~azure.mgmt.storage.v2016_01_01.models.StorageAccountPaged[~azure.mgmt.storage.v2016_01_01.models.StorageAccount]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the storage accounts available under the given resource
group. Note that storage keys are not returned; use the ListKeys
operation for this.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of StorageAccount
:rtype:
~azure.mgmt.storage.v2016_01_01.models.StorageAccountPaged[~azure.mgmt.storage.v2016_01_01.models.StorageAccount]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: StorageAccountListKeysResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.storage.v2016_01_01.models.StorageAccountListKeysResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountListKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def regenerate_key(
self, resource_group_name, account_name, key_name, custom_headers=None, raw=False, **operation_config):
"""Regenerates one of the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param key_name:
:type key_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: StorageAccountListKeysResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.storage.v2016_01_01.models.StorageAccountListKeysResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
regenerate_key1 = models.StorageAccountRegenerateKeyParameters(key_name=key_name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(regenerate_key1, 'StorageAccountRegenerateKeyParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountListKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import netaddr
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import excutils
import six
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_local_router
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import exceptions
from neutron.common import topics
from neutron.common import utils as common_utils
from neutron.i18n import _LE
from networking_fortinet.common import constants as consts
from networking_fortinet.services.l3_router import l3_fortinet
from networking_fortinet.rpc.handlers import fortinet_agent_rpc
from networking_fortinet.tasks import tasks
LOG = logging.getLogger(__name__)
# xor-folding mask used for IPv6 rule index
MASK_30 = 0x3fffffff
INTERNAL_DEV_PORT = consts.INTERNAL_DEV_PORT
EXTERNAL_DEV_PORT = consts.EXTERNAL_DEV_PORT
class DvrLocalRouter(dvr_local_router.DvrLocalRouter):
def __init__(self, agent, host, *args, **kwargs):
super(DvrLocalRouter, self).__init__(agent, host, *args, **kwargs)
self.fortigate = agent.fortigate
self.host = host
self.floating_ips_dict = {}
# Linklocal subnet for router and floating IP namespace link
self.rtr_fip_subnet = None
self.dist_fip_count = None
self.fip_ns = None
@log_helpers.log_method_call
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
floating_ips = super(DvrLocalRouter, self).get_floating_ips()
return [i for i in floating_ips if i['host'] == self.host]
@log_helpers.log_method_call
def _handle_fip_nat_rules(self, interface_name):
"""Configures NAT rules for Floating IPs for DVR.
Remove all the rules. This is safe because if
use_namespaces is set as False then the agent can
only configure one router, otherwise each router's
NAT rules will be in their own namespace.
"""
self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
self.iptables_manager.ipv4['nat'].empty_chain('snat')
# Add back the jump to float-snat
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add the NAT rule back
rule = ('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})
self.iptables_manager.ipv4['nat'].add_rule(*rule)
self.iptables_manager.apply()
@log_helpers.log_method_call
def floating_ip_added_dist(self, fip, fip_cidr):
"""Add floating IP to FIP namespace."""
floating_ip = fip['floating_ip_address']
fixed_ip = fip['fixed_ip_address']
rule_pr = self.fip_ns.allocate_rule_priority(floating_ip)
self.floating_ips_dict[floating_ip] = rule_pr
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.add(ip=fixed_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
#Add routing rule in fip namespace
fip_ns_name = self.fip_ns.get_name()
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, _ = self.rtr_fip_subnet.get_pair()
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.add_route(fip_cidr, str(rtr_2_fip.ip))
interface_name = (
self.fip_ns.get_ext_device_name(
self.fip_ns.agent_gateway_port['id']))
ip_lib.send_ip_addr_adv_notif(fip_ns_name,
interface_name,
floating_ip,
self.agent_conf)
# update internal structures
self.dist_fip_count = self.dist_fip_count + 1
@log_helpers.log_method_call
def floating_ip_removed_dist(self, fip_cidr):
"""Remove floating IP from FIP namespace."""
floating_ip = fip_cidr.split('/')[0]
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()
fip_ns_name = self.fip_ns.get_name()
if floating_ip in self.floating_ips_dict:
rule_pr = self.floating_ips_dict[floating_ip]
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.delete(ip=floating_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
self.fip_ns.deallocate_rule_priority(floating_ip)
#TODO(rajeev): Handle else case - exception/log?
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))
# check if this is the last FIP for this router
self.dist_fip_count = self.dist_fip_count - 1
if self.dist_fip_count == 0:
#remove default route entry
device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name)
ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name)
device.route.delete_gateway(str(fip_2_rtr.ip),
table=dvr_fip_ns.FIP_RT_TBL)
self.fip_ns.local_subnets.release(self.router_id)
self.rtr_fip_subnet = None
ns_ip.del_veth(fip_2_rtr_name)
is_last = self.fip_ns.unsubscribe(self.router_id)
if is_last:
# TODO(Carl) I can't help but think that another router could
# come in and want to start using this namespace while this is
# destroying it. The two could end up conflicting on
# creating/destroying interfaces and such. I think I'd like a
# semaphore to sync creation/deletion of this namespace.
# NOTE (Swami): Since we are deleting the namespace here we
# should be able to delete the floatingip agent gateway port
# for the provided external net since we don't need it anymore.
if self.fip_ns.agent_gateway_port:
LOG.debug('Removed last floatingip, so requesting the '
'server to delete Floatingip Agent Gateway port:'
'%s', self.fip_ns.agent_gateway_port)
self.agent.plugin_rpc.delete_agent_gateway_port(
self.agent.context,
self.fip_ns.agent_gateway_port['network_id'])
self.fip_ns.delete()
self.fip_ns = None
@log_helpers.log_method_call
def add_floating_ip(self, fip, interface_name, device):
if not self._add_fip_addr_to_device(fip, device):
return l3_constants.FLOATINGIP_STATUS_ERROR
# Special Handling for DVR - update FIP namespace
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
self.floating_ip_added_dist(fip, ip_cidr)
return l3_constants.FLOATINGIP_STATUS_ACTIVE
@log_helpers.log_method_call
def remove_floating_ip(self, device, ip_cidr):
super(DvrLocalRouter, self).remove_floating_ip(device, ip_cidr)
self.floating_ip_removed_dist(ip_cidr)
@log_helpers.log_method_call
def _get_internal_port(self, subnet_id):
"""Return internal router port based on subnet_id."""
router_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
for port in router_ports:
fips = port['fixed_ips']
for f in fips:
if f['subnet_id'] == subnet_id:
return port
@log_helpers.log_method_call
def get_internal_device_name(self, port_id):
return INTERNAL_DEV_PORT
@log_helpers.log_method_call
def _update_arp_entry(self, ip, mac, subnet_id, operation):
"""Add or delete arp entry into router namespace for the subnet."""
port = self._get_internal_port(subnet_id)
#import ipdb;ipdb.set_trace()
# update arp entry only if the subnet is attached to the router
if not port:
return
try:
# TODO(mrsmith): optimize the calls below for bulk calls
interface_name = self.get_internal_device_name(port['id'])
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
if operation == 'add':
device.neigh.add(ip, mac)
elif operation == 'delete':
device.neigh.delete(ip, mac)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("DVR: Failed updating arp entry"))
@log_helpers.log_method_call
def _set_subnet_arp_info(self, subnet_id):
"""Set ARP info retrieved from Plugin for existing ports."""
# TODO(Carl) Can we eliminate the need to make this RPC while
# processing a router.
subnet_ports = self.agent.get_ports_by_subnet(subnet_id)
for p in subnet_ports:
if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS:
for fixed_ip in p['fixed_ips']:
self._update_arp_entry(fixed_ip['ip_address'],
p['mac_address'],
subnet_id,
'add')
@staticmethod
@log_helpers.log_method_call
def _get_snat_idx(ip_cidr):
"""Generate index for DVR snat rules and route tables.
The index value has to be 32 bits or less but more than the system
generated entries i.e. 32768. For IPv4 use the numeric value of the
cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits.
Use the freed range to extend smaller values so that they become
greater than system generated entries.
"""
net = netaddr.IPNetwork(ip_cidr)
if net.version == 6:
if isinstance(ip_cidr, six.text_type):
ip_cidr = ip_cidr.encode() # Needed for Python 3.x
# the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility
snat_idx = binascii.crc32(ip_cidr) & 0xffffffff
# xor-fold the hash to reserve upper range to extend smaller values
snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30)
if snat_idx < 32768:
snat_idx = snat_idx + MASK_30
else:
snat_idx = net.value
return snat_idx
@log_helpers.log_method_call
def _delete_gateway_device_if_exists(self, ns_ip_device, gw_ip_addr,
snat_idx):
try:
ns_ip_device.route.delete_gateway(gw_ip_addr,
table=snat_idx)
except exceptions.DeviceNotFoundError:
pass
@log_helpers.log_method_call
def _snat_redirect_modify(self, gateway, sn_port, sn_int, is_add):
"""Adds or removes rules and routes for SNAT redirection."""
try:
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
if is_add:
ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name)
for port_fixed_ip in sn_port['fixed_ips']:
# Find the first gateway IP address matching this IP version
port_ip_addr = port_fixed_ip['ip_address']
port_ip_vers = netaddr.IPAddress(port_ip_addr).version
for gw_fixed_ip in gateway['fixed_ips']:
gw_ip_addr = gw_fixed_ip['ip_address']
if netaddr.IPAddress(gw_ip_addr).version == port_ip_vers:
sn_port_cidr = common_utils.ip_to_cidr(
port_ip_addr, port_fixed_ip['prefixlen'])
snat_idx = self._get_snat_idx(sn_port_cidr)
if is_add:
ns_ipd.route.add_gateway(gw_ip_addr,
table=snat_idx)
ns_ipr.rule.add(ip=sn_port_cidr,
table=snat_idx,
priority=snat_idx)
ns_ipwrapr.netns.execute(
['sysctl', '-w',
'net.ipv4.conf.%s.send_redirects=0' % sn_int])
else:
self._delete_gateway_device_if_exists(ns_ipd,
gw_ip_addr,
snat_idx)
ns_ipr.rule.delete(ip=sn_port_cidr,
table=snat_idx,
priority=snat_idx)
break
except Exception:
if is_add:
exc = _LE('DVR: error adding redirection logic')
else:
exc = _LE('DVR: snat remove failed to clear the rule '
'and device')
LOG.exception(exc)
@log_helpers.log_method_call
def _snat_redirect_add(self, gateway, sn_port, sn_int):
"""Adds rules and routes for SNAT redirection."""
self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=True)
@log_helpers.log_method_call
def _snat_redirect_remove(self, gateway, sn_port, sn_int):
"""Removes rules and routes for SNAT redirection."""
self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=False)
@log_helpers.log_method_call
def internal_network_added(self, port):
super(DvrLocalRouter, self).internal_network_added(port)
# NOTE: The following function _set_subnet_arp_info
# should be called to dynamically populate the arp
# entries for the dvr services ports into the router
# namespace. This does not have dependency on the
# external_gateway port or the agent_mode.
for subnet in port['subnets']:
self._set_subnet_arp_info(subnet['id'])
ex_gw_port = self.get_ex_gw_port()
if not ex_gw_port:
return
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_add(sn_port, port, interface_name)
@log_helpers.log_method_call
def _dvr_internal_network_removed(self, port):
if not self.ex_gw_port:
return
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
if not sn_port:
return
# DVR handling code for SNAT
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_remove(sn_port, port, interface_name)
@log_helpers.log_method_call
def internal_network_removed(self, port):
self._dvr_internal_network_removed(port)
super(DvrLocalRouter, self).internal_network_removed(port)
@log_helpers.log_method_call
def get_floating_agent_gw_interface(self, ext_net_id):
"""Filter Floating Agent GW port for the external network."""
fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
return next(
(p for p in fip_ports if p['network_id'] == ext_net_id), None)
@log_helpers.log_method_call
def get_external_device_interface_name(self, ex_gw_port):
fip_int = self.fip_ns.get_int_device_name(self.router_id)
if ip_lib.device_exists(fip_int, namespace=self.fip_ns.get_name()):
return self.fip_ns.get_rtr_ext_device_name(self.router_id)
@log_helpers.log_method_call
def external_gateway_added(self, ex_gw_port, interface_name):
# TODO(Carl) Refactor external_gateway_added/updated/removed to use
# super class implementation where possible. Looks like preserve_ips,
# and ns_name are the key differences.
ip_wrapr = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapr.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.send_redirects=0'])
for p in self.internal_ports:
gateway = self.get_snat_port_for_internal_port(p)
id_name = self.get_internal_device_name(p['id'])
if gateway:
self._snat_redirect_add(gateway, p, id_name)
for port in self.get_snat_interfaces():
for ip in port['fixed_ips']:
self._update_arp_entry(ip['ip_address'],
port['mac_address'],
ip['subnet_id'],
'add')
@log_helpers.log_method_call
def external_gateway_updated(self, ex_gw_port, interface_name):
pass
@log_helpers.log_method_call
def external_gateway_removed(self, ex_gw_port, interface_name):
# TODO(Carl) Should this be calling process_snat_dnat_for_fip?
self.process_floating_ip_nat_rules()
if self.fip_ns:
to_fip_interface_name = (
self.get_external_device_interface_name(ex_gw_port))
self.process_floating_ip_addresses(to_fip_interface_name)
for p in self.internal_ports:
# NOTE: When removing the gateway port, pass in the snat_port
# cache along with the current ports.
gateway = self.get_snat_port_for_internal_port(p, self.snat_ports)
internal_interface = self.get_internal_device_name(p['id'])
self._snat_redirect_remove(gateway, p, internal_interface)
@log_helpers.log_method_call
def _handle_router_snat_rules(self, ex_gw_port, interface_name):
pass
@log_helpers.log_method_call
def process_external(self, agent):
ex_gw_port = self.get_ex_gw_port()
if ex_gw_port:
self.create_dvr_fip_interfaces(ex_gw_port)
super(DvrLocalRouter, self).process_external(agent)
@log_helpers.log_method_call
def create_dvr_fip_interfaces(self, ex_gw_port):
floating_ips = self.get_floating_ips()
fip_agent_port = self.get_floating_agent_gw_interface(
ex_gw_port['network_id'])
if fip_agent_port:
LOG.debug("FloatingIP agent gateway port received from the "
"plugin: %s", fip_agent_port)
is_first = False
if floating_ips:
is_first = self.fip_ns.subscribe(self.router_id)
if is_first and not fip_agent_port:
LOG.debug("No FloatingIP agent gateway port possibly due to "
"late binding of the private port to the host, "
"requesting agent gateway port for 'network-id' :"
"%s", ex_gw_port['network_id'])
fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port(
self.agent.context, ex_gw_port['network_id'])
if not fip_agent_port:
LOG.error(_LE("No FloatingIP agent gateway port "
"returned from server for 'network-id': "
"%s"), ex_gw_port['network_id'])
if is_first and fip_agent_port:
if 'subnets' not in fip_agent_port:
LOG.error(_LE('Missing subnet/agent_gateway_port'))
else:
self.fip_ns.create_gateway_port(fip_agent_port)
if (self.fip_ns.agent_gateway_port and
(self.dist_fip_count == 0 or is_first)):
self.fip_ns.create_rtr_2_fip_link(self)
# kicks the FW Agent to add rules for the IR namespace if
# configured
self.agent.process_router_add(self)
@log_helpers.log_method_call
def process(self, agent):
ex_gw_port = self.get_ex_gw_port()
print "###@@@ ex_gw_port=%s" % ex_gw_port
#import ipdb;ipdb.set_trace()
if ex_gw_port:
self.fip_ns = agent.get_fip_ns(ex_gw_port['network_id'])
self.fip_ns.scan_fip_ports(self)
super(DvrLocalRouter, self).process(agent)
|
|
from __future__ import unicode_literals
import sys
import os
import re
import mimetypes
from copy import copy
from io import BytesIO
try:
from urllib.parse import unquote, urlparse, urlsplit
except ImportError: # Python 2
from urllib import unquote
from urlparse import urlparse, urlsplit
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (request_started, request_finished,
got_request_exception)
from django.db import close_connection
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import force_bytes, force_str
from django.utils.http import urlencode
from django.utils.importlib import import_module
from django.utils.itercompat import is_iterable
from django.utils import six
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_connection)
close() # will fire request_finished
request_finished.connect(close_connection)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
from django.conf import settings
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.send(sender=self.__class__)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
response = self.get_response(request)
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_connection)
response.close() # will fire request_finished
request_finished.connect(close_connection)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
]])
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
]])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
content_type = mimetypes.guess_type(file.name)[0]
if hasattr(file, 'content_type'):
content_type = file.content_type
else:
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"' \
% (key, os.path.basename(file.name))),
to_bytes('Content-Type: %s' % content_type),
b'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type, ):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = unquote(path)
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
path = path.encode('utf-8').decode('iso-8859-1')
return path
def get(self, path, data={}, **extra):
"Construct a GET request."
parsed = urlparse(path)
r = {
'CONTENT_TYPE': str('text/html; charset=utf-8'),
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or force_str(parsed[4]),
'REQUEST_METHOD': str('GET'),
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a POST request."
post_data = self._encode_data(data, content_type)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': force_str(parsed[4]),
'REQUEST_METHOD': str('POST'),
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def head(self, path, data={}, **extra):
"Construct a HEAD request."
parsed = urlparse(path)
r = {
'CONTENT_TYPE': str('text/html; charset=utf-8'),
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or force_str(parsed[4]),
'REQUEST_METHOD': str('HEAD'),
}
r.update(extra)
return self.request(**r)
def options(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type, **extra)
def put(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type, **extra)
def generic(self, method, path,
data='', content_type='application/octet-stream', **extra):
parsed = urlparse(path)
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': force_str(parsed[4]),
'REQUEST_METHOD': str(method),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signals.template_rendered.connect(on_template_render, dispatch_uid="template-render")
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid="template-render")
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data={}, follow=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path,
data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path,
data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path,
data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if user and user.is_active \
and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
session = import_module(settings.SESSION_ENGINE).SessionStore()
session_cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if session_cookie:
session.delete(session_key=session_cookie.value)
self.cookies = SimpleCookie()
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response['Location']
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
url = urlsplit(url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
ocr_pdf.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
import subprocess
import os
import shutil
import glob
import argparse
def call(cmd, check=True, stdout=None, stderr=None):
"""
Args:
check: check return code or not
"""
if check:
return subprocess.check_call(cmd, stdout=stdout, stderr=stderr, shell=True)
else:
return subprocess.call(cmd, stdout=stdout, stderr=stderr, shell=True)
def unzip(zip_file, func=call):
cmd = "unzip -o '%s'" % zip_file
try:
return func(cmd)
except subprocess.CalledProcessError as e:
if e.returncode != 2:
raise e
def cp(wild_pathname, dst):
"""Unix-like file copy"""
for src in glob.glob(wild_pathname):
if os.path.isdir(dst):
shutil.copy(src, os.path.join(dst, os.path.basename(src)))
else:
shutil.copy(src, dst)
return True
def k2pdfopt(pdf_file, output_file, func=call):
"""convert multi-column PDF into single column
K2pdfopt (Kindle 2 PDF Optimizer) is a stand-alone program which optimizes the format of PDF (or DJVU) files for viewing on small (e.g. 6-inch) mobile reader and smartphone screens such as the Kindle's.
The output from k2pdfopt is a new (optimized) PDF file.
http://www.willus.com/k2pdfopt/
Args:
output_file: this is a required parameter, because k2pdfopt always return 0
Returns:
0: WARNING, k2pdfopt will always return 0, judge its succeed by looking at the output_file
"""
try:
os.remove(output_file)
except OSError as e:
if e.errno != 2:
raise e
cmd = "./k2pdfopt -ui- -x -w 2160 -h 3840 -odpi 300 '%s' -o '%s'" % (pdf_file, output_file)
return func(cmd)
def pdf_to_png(pdf_file, tmp_folder=None, func=call):
if tmp_folder:
cmd = "./codes/convert/cde-exec 'gs' -dBATCH -dNOPAUSE -sDEVICE=png16m -dGraphicsAlphaBits=4 -dTextAlphaBits=4 -r600 -sOutputFile='%s/page-%%d.png' '%s'"\
% (tmp_folder, pdf_file)
else:
cmd = "./codes/convert/cde-exec 'gs' -dBATCH -dNOPAUSE -sDEVICE=png16m -dGraphicsAlphaBits=4 -dTextAlphaBits=4 -r600 -sOutputFile=page-%%d.png '%s'" % pdf_file
return func(cmd)
def pdf_to_bmp(pdf_file, tmp_folder=None, func=call):
if tmp_folder:
cmd = "./codes/convert/cde-exec 'gs' -SDEVICE=bmpmono -r600x600 -sOutputFile='%s/cuneiform-page-%%04d.bmp' -dNOPAUSE -dBATCH -- '%s'"\
% (tmp_folder, pdf_file)
else:
cmd = "./codes/convert/cde-exec 'gs' -SDEVICE=bmpmono -r600x600 -sOutputFile='cuneiform-page-%%04d.bmp' -dNOPAUSE -dBATCH -- '%s'" % pdf_file
return func(cmd)
def tesseract(png_folder_path, output_folder_path=None, func=call):
"""
Returns:
0, always return 0
"""
png_folder_path = os.path.abspath(png_folder_path)
if not output_folder_path:
output_folder_path = png_folder_path
for i in os.listdir(png_folder_path):
if i.endswith('.png'):
png_path = os.path.join(png_folder_path, i)
ppm_filename = "%s.ppm" % png_path
ppm_filename = ppm_filename.replace(".png","")
hocr_filename = os.path.join(output_folder_path, "%s.hocr" % i.replace(".png",""))
cmd = "./codes/convert/cde-exec 'convert' -density 750 '%s' '%s'" % (png_path, ppm_filename)
func(cmd)
cmd = "./codes/tesseract/cde-exec 'tesseract' '%s' '%s' hocr" % (ppm_filename, hocr_filename)
func(cmd)
cmd = "rm -f '%s'" % (ppm_filename)
func(cmd)
return 0
def cuneiform(bmp_folder_path, output_folder_path=None, func=call):
"""
Returns:
0, always return 0
"""
bmp_folder_path = os.path.abspath(bmp_folder_path)
if not output_folder_path:
output_folder_path = bmp_folder_path
for i in os.listdir(bmp_folder_path):
if i.endswith('.bmp'):
cmd = "./cde-package/cde-exec '/scratch.1/pdf2xml/cuneiform/bin/cuneiform' -f hocr -o '%s.html' '%s'"\
% (os.path.join(output_folder_path, i), os.path.join(bmp_folder_path,i))
func(cmd)
return 0
def tiff_to_html(tiff_path, output_folder_path=None, func=call):
output_folder_path = os.path.abspath(output_folder_path) if output_folder_path else os.path.abspath('.')
hocr_path = os.path.join(output_folder_path, os.path.basename(tiff_path))
cmd = "./codes/tesseract/cde-exec 'tesseract' '%s' '%s.hocr' hocr" % (tiff_path, hocr_path)
return func(cmd)
class OcrPdf(object):
def __init__(self, pdf_path, stdout_filepath, stderr_filepath,
output_folder_path=None, cuneiform=True, tesseract=True, k2pdf = False):
try:
self.stdout = open(stdout_filepath, 'a')
self.stderr = open(stderr_filepath, 'a')
self.pdf_path = pdf_path
self.k2pdf = k2pdf
self.cuneiform = cuneiform
self.tesseract = tesseract
self.output_folder_path = output_folder_path
except IOError as e:
print "ERROR\tInvalid filepath %s, %s" % (stdout_filepath, stderr_filepath)
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
raise e
shutil.rmtree('tmp', True)
try:
os.mkdir('tmp')
except OSError as e:
print "ERROR\tCreate tmp folder"
raise e
if self.output_folder_path and not os.path.isdir(self.output_folder_path):
try:
os.mkdir(self.output_folder_path)
except OSError as e:
print "ERROR\tCreate output folder"
raise e
def __del__(self):
shutil.rmtree('tmp', True)
def call(self, cmd, check=True):
return call(cmd, check=check, stdout=self.stdout, stderr=self.stderr)
def do(self):
# Usage of ocr2 and cuneiform will depend on desired runtime options.
if self.k2pdf:
output_file = "k2_pdf_%s" % self.pdf_path
print k2pdfopt(self.pdf_path, output_file, func=self.call)
else:
output_file = self.pdf_path
unzip("ocr2.zip", func=self.call)
unzip("cuneiform.zip", func=self.call)
if self.tesseract:
print pdf_to_png(output_file, tmp_folder='tmp', func=self.call)
print tesseract('tmp', self.output_folder_path, self.call)
if self.cuneiform:
print pdf_to_bmp(output_file, tmp_folder='tmp', func=self.call)
print cuneiform('tmp', self.output_folder_path, self.call)
def tiffs_to_htmls(self, tiff_folder_path):
"""
Returns:
True or the file failed to be converted
"""
for i in os.listdir(tiff_folder_path):
if i.endswith('.tif') or i.endswith('.tiff'):
tiff_path = os.path.join(tiff_folder_path, i)
if tiff_to_html(tiff_path, self.output_folder_path, self.call):
return tiff_path
return True
def main(args):
o = OcrPdf(args.file, 'out.txt', 'out.txt', './',args.cuneiform,args.tesseract,args.k2pdf)
o.do()
# o.tiffs_to_htmls(argv[1])
def detect_layout_fonts(pdf_file, output_folder, enable_tesseract, enable_k2pdf):
import old_cuneiform_arcane
o = OcrPdf(pdf_file, 'out.txt', 'out.txt', output_folder,
True, enable_tesseract, enable_k2pdf)
o.do()
try:
shutil.rmtree('tmp', True)
os.mkdir('tmp')
cp(os.path.join(output_folder, "cune*html"), 'tmp')
old_cuneiform_arcane.parse_cunneiform_results_and_extract_layout_font_information('tmp')
cp("tmp/*", output_folder)
except:
raise
finally:
shutil.rmtree('tmp', True)
for src in glob.glob(os.path.join(output_folder, "cuneiform-page-*")):
if os.path.isdir(src):
shutil.rmtree(src, True)
else:
os.remove(src)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('file', type=str, default="input.pdf", help='Filename to process')
parser.add_argument('--output-folder', type=str, default="./", help='output folder')
parser.add_argument('--cuneiform', dest='cuneiform', action='store_true', help='Run Cuneiform OCR?')
parser.add_argument('--no-cuneiform', dest='cuneiform', action='store_false', help='Run Cuneiform OCR?')
parser.add_argument('--tesseract', dest='tesseract', action='store_true', help='Run Tesseract OCR?')
parser.add_argument('--no-tesseract', dest='tesseract', action='store_false', help='Run Tesseract OCR?')
parser.add_argument('--k2pdf', type=bool, required=False, default=False, help='Run k2pdf step?')
exclusives = parser.add_mutually_exclusive_group()
exclusives.add_argument('--fonttype', action='store_true', help='Run fonttype')
args = parser.parse_args()
if args.fonttype:
detect_layout_fonts(args.file, args.output_folder,
args.tesseract, args.k2pdf)
else:
main(args)
|
|
"""Unit test for eda.py
"""
# Author : Jin Kim jjinking(at)gmail(dot)com
# Creation date : 2014.02.13
# Last Modified : 2016.04.20
#
# License : MIT
import os
import numpy as np
import pandas as pd
import scipy.stats
import unittest
from datsci import eda
CURDIR = os.path.dirname(os.path.abspath(__file__))
class TestEda(unittest.TestCase):
def test_df_isclose(self):
# Test integers
df1 = pd.DataFrame([[1, 2],
[3, 4]])
df2 = pd.DataFrame([[1, 2],
[3, 4]])
self.assertTrue(eda.df_isclose(df1, df2, tol=0))
df1 = pd.DataFrame([[1, 2],
[3, 5]])
df2 = pd.DataFrame([[1, 2],
[3, 4]])
self.assertFalse(eda.df_isclose(df1, df2))
# Test rounding
df1 = pd.DataFrame([[1.1234, 2.1234],
[3.1234, 4.1234]])
df2 = pd.DataFrame([[1.1234, 2.1234],
[3.1234, 4.1234]])
self.assertTrue(eda.df_isclose(df1, df2))
df1 = pd.DataFrame([[1.1234, 2.1234],
[3.1234, 5.1234]])
df2 = pd.DataFrame([[1.1234, 2.1234],
[3.1234, 4.1232]])
self.assertFalse(eda.df_isclose(df1, df2))
df1 = pd.DataFrame([[1.1234, 2.1234],
[3.1234, 4.1234]])
df2 = pd.DataFrame([[1.1234, 2.1234],
[3.1234, 4.1232]])
self.assertTrue(eda.df_isclose(df1, df2, tol=1e-3))
df1 = pd.DataFrame([[np.nan, 2.1234],
[3.1234, 5.1234123]])
df2 = pd.DataFrame([[np.nan, 2.1234],
[3.1234, 5.123412]])
self.assertTrue(eda.df_isclose(df1, df2, tol=1e-6))
def test_find_const_cols(self):
df = pd.DataFrame([[1, 2, 33, 4],
[None, None, np.nan, 4],
[None, np.nan, np.nan, 44],
[None, 2, 33, 44],
[None, np.nan, 33, 44],
[None, None, 33, 44]], columns=['a', 'b', 'c', 'd'])
self.assertEqual(eda.find_const_cols(df, dropna=True),
['a', 'b', 'c'])
def test_find_null_cols(self):
'''
Test finding columns with null values at least a certain fraction of the row
'''
df = pd.DataFrame([[None, 1, 1, 1.0, 1.0, 1],
[None, 1, None, 0, 1.0, 0],
[None, 2, None, None, 1.0, 0],
[None, 2, None, None, None, 0],
[None, 2, None, None, None, None]],
columns=['a', 'b', 'c', 'd', 'e', 'f'])
self.assertEqual(
eda.find_null_cols(df, frac=.99), ['a'])
self.assertEqual(
eda.find_null_cols(df, frac=.81), ['a'])
self.assertEqual(
eda.find_null_cols(df, frac=.80), ['a', 'c'])
self.assertEqual(
eda.find_null_cols(df, frac=.79), ['a', 'c'])
self.assertEqual(
eda.find_null_cols(df, frac=.60), ['a', 'c', 'd'])
self.assertEqual(
eda.find_null_cols(df, frac=.39), ['a', 'c', 'd', 'e'])
self.assertEqual(
eda.find_null_cols(df, frac=.20), ['a', 'c', 'd', 'e', 'f'])
self.assertEqual(
eda.find_null_cols(df, frac=0.0), ['a', 'b', 'c', 'd', 'e', 'f'])
def test_find_n_nary_cols(self):
df = pd.DataFrame([[1, 11, 1, 1.0, 1.0, 1.000001],
[0, 11, None, 0, 1.0, 0],
[1, 22, None, None, 1.0, 0]],
columns=['a', 'b', 'c', 'd', 'e', 'f'])
# Find binary cols
self.assertEqual(eda.find_n_nary_cols(df, n=2, dropna=True),
['a', 'b', 'd', 'f'])
self.assertEqual(eda.find_n_nary_cols(df, n=2, dropna=False),
['a', 'b', 'c', 'f'])
# Find ternary cols
self.assertEqual(eda.find_n_nary_cols(df, n=3, dropna=True), [])
self.assertEqual(
eda.find_n_nary_cols(df, n=3, dropna=False), ['d'])
def test_get_feature_clusters(self):
# Test clustering 100% correlated values
df = pd.DataFrame([[1, 1, 1, 1, 2, 1],
[2, 1, 2, 2, 2, 1],
[3, 5, 3, 3, 1, 5],
[4, 5, 4, 4, 2, 5],
[5, 3, 5, 5, 2, 3]],
columns=['a', 'b', 'c', 'd', 'e', 'f'])
clusts = sorted([
sorted(clust) for clust in eda.get_feature_clusters(
df, cols=df.columns, thresh=1.0)
])
self.assertEqual(clusts, [['a', 'c', 'd'], ['b', 'f'], ['e']])
# Test thresholding
df = pd.DataFrame([[1, 1, 1, 1, 2, 1],
[2, 1, 2, 1, 2, 1],
[3, 5, 2, 3, 1, 5],
[4, 5, 4, 4, 2, 5],
[5, 3, 5, 5, 2, 3]],
columns=['a', 'b', 'c', 'd', 'e', 'f'])
# Check the correlation range
self.assertTrue(0.95 < scipy.stats.pearsonr(df.a, df.c)[0] < 0.97)
clusts = sorted([
sorted(clust) for clust in eda.get_feature_clusters(
df, thresh=0.95)
])
self.assertEqual(clusts, [['a', 'c', 'd'], ['b', 'f'], ['e']])
clusts = sorted([
sorted(clust) for clust in eda.get_feature_clusters(
df, cols=df.columns, thresh=0.97)
])
self.assertEqual(clusts, [['a', 'd'], ['b', 'f'], ['c'], ['e']])
def test_summarize_training_data(self):
sample_file_csv = os.path.join(CURDIR, 'res', 'sample1.csv')
df = pd.read_csv(sample_file_csv)
(summary,
n_rows,
label_counts) = eda.summarize_training_data(df,
y_name='c',
summary_pkl=None)
self.assertEqual(n_rows, 8)
self.assertEqual(len(label_counts), 8)
self.assertEqual(set(label_counts.values()), {1})
self.assertEqual(summary.shape[0], 3)
self.assertEqual(
summary[summary['attribute'] == 'a']['min'].values[0], 1)
self.assertEqual(
summary[summary['attribute'] == 'a']['max'].values[0], 11111111)
self.assertEqual(
summary[summary['attribute'] == 'a']['n_null'].values[0], 0)
self.assertEqual(
summary[summary['attribute'] == 'a']['perc_null'].values[0], 0)
self.assertEqual(
summary[summary['attribute'] == 'b']['min'].values[0], 2)
self.assertEqual(
summary[summary['attribute'] == 'b']['max'].values[0], 22222222)
self.assertEqual(
summary[summary['attribute'] == 'b']['n_null'].values[0], 0)
self.assertEqual(
summary[summary['attribute'] == 'b']['perc_null'].values[0], 0)
self.assertEqual(
summary[summary['attribute'] == 'c']['min'].values[0], 3)
self.assertEqual(
summary[summary['attribute'] == 'c']['max'].values[0], 33333333)
self.assertEqual(
summary[summary['attribute'] == 'c']['n_null'].values[0], 0)
self.assertEqual(
summary[summary['attribute'] == 'c']['perc_null'].values[0], 0)
sample_file_csv = os.path.join(CURDIR, 'res', 'sample2.csv')
df = pd.read_csv(sample_file_csv)
(summary,
n_rows,
label_counts) = eda.summarize_training_data(df,
y_name='c',
summary_pkl=None)
self.assertEqual(n_rows, 10)
self.assertEqual(len(label_counts), 4)
self.assertEqual(set(label_counts.values()), {1, 2, 3, 4})
self.assertEqual(label_counts[sorted(label_counts.keys())[0]], 3)
self.assertEqual(label_counts[3], 1)
self.assertEqual(label_counts[33], 2)
self.assertEqual(label_counts[333], 4)
self.assertEqual(summary.shape[0], 3)
summary_a = summary[summary['attribute'] == 'a']
self.assertEqual(summary_a['min'].values[0], 11)
self.assertEqual(summary_a['max'].values[0], 1111111111)
self.assertEqual(summary_a['n_null'].values[0], 1)
self.assertEqual(summary_a['perc_null'].values[0], .10)
self.assertEqual(summary_a['n_uniq'].values[0], 10)
summary_b = summary[summary['attribute'] == 'b']
self.assertEqual(summary_b['min'].values[0], 2)
self.assertEqual(summary_b['max'].values[0], 222222222)
self.assertEqual(summary_b['n_null'].values[0], 2)
self.assertEqual(summary_b['perc_null'].values[0], .2)
self.assertEqual(summary_b['n_uniq'].values[0], 9)
summary_c = summary[summary['attribute'] == 'c']
self.assertEqual(summary_c['min'].values[0], 3)
self.assertEqual(summary_c['max'].values[0], 333)
self.assertEqual(summary_c['n_null'].values[0], 3)
self.assertEqual(summary_c['perc_null'].values[0], .3)
self.assertEqual(summary_c['n_uniq'].values[0], 4)
sample_file_csv = os.path.join(CURDIR, 'res', 'sample3.csv')
summary_pkl_file = os.path.join(CURDIR, 'res', 'foo.pkl')
df = pd.read_csv(sample_file_csv)
summary, n_rows, label_counts = eda.summarize_training_data(
df,
y_name='z',
summary_pkl=summary_pkl_file)
self.assertEqual(n_rows, 10)
self.assertEqual(len(label_counts), 4)
self.assertEqual(set(label_counts.values()), {1, 2, 3, 4})
self.assertEqual(label_counts[np.nan], 3)
self.assertEqual(label_counts['c'], 1)
self.assertEqual(label_counts['cc'], 2)
self.assertEqual(label_counts['ccc'], 4)
self.assertEqual(summary.shape[0], 3)
summary_x = summary[summary['attribute'] == 'x']
self.assertTrue(pd.isnull(summary_x['min'].values[0]))
self.assertTrue(pd.isnull(summary_x['max'].values[0]))
self.assertEqual(summary_x['n_null'].values[0], 1)
self.assertEqual(summary_x['perc_null'].values[0], .10)
self.assertEqual(summary_x['n_uniq'].values[0], 10)
summary_y = summary[summary['attribute'] == 'y']
self.assertTrue(pd.isnull(summary_y['min'].values[0]))
self.assertTrue(pd.isnull(summary_y['max'].values[0]))
self.assertEqual(summary_y['n_null'].values[0], 2)
self.assertEqual(summary_y['perc_null'].values[0], .2)
self.assertEqual(summary_y['n_uniq'].values[0], 9)
summary_z = summary[summary['attribute'] == 'z']
self.assertTrue(pd.isnull(summary_z['min'].values[0]))
self.assertTrue(pd.isnull(summary_z['max'].values[0]))
self.assertEqual(summary_z['n_null'].values[0], 3)
self.assertEqual(summary_z['perc_null'].values[0], .3)
self.assertEqual(summary_z['n_uniq'].values[0], 4)
# Check that summary pkl file exists
self.assertTrue(os.path.exists(summary_pkl_file))
# Check saved values can be loaded and is correct
summary2, n_rows2, label_counts2 = eda.load_summary_data(
summary_pkl_file)
self.assertTrue(eda.df_isclose(summary, summary2))
self.assertEqual(n_rows, n_rows2)
self.assertEqual(str(list(label_counts.items())),
str(list(label_counts2.items())))
# Delete file
os.remove(summary_pkl_file)
self.assertFalse(os.path.exists(summary_pkl_file))
# Run again with summary_pkl option set to None
summary, n_rows, label_counts = eda.summarize_training_data(
df,
y_name='z',
summary_pkl=None)
self.assertFalse(os.path.exists(summary_pkl_file))
def test_summarize_big_training_data(self):
for fname in ['sample1.csv', 'sample1.csv.zip', 'sample1.csv.gz',
'sample1.csv.tar.gz', 'sample1.csv.tar.bz2']:
sample_file_csv = os.path.join(CURDIR, 'res', fname)
(summary,
n_rows,
label_counts) = eda.summarize_big_training_data(sample_file_csv,
y_name='c',
summary_pkl=None)
self.assertEqual(n_rows, 8)
self.assertEqual(len(label_counts), 8)
self.assertEqual(set(label_counts.values()), {1})
self.assertEqual(summary.shape[0], 3)
summary_a = summary[summary['attribute'] == 'a']
self.assertEqual(summary_a['min'].values[0], 1)
self.assertEqual(summary_a['max'].values[0], 11111111)
self.assertEqual(summary_a['n_null'].values[0], 0)
self.assertEqual(summary_a['perc_null'].values[0], 0)
summary_b = summary[summary['attribute'] == 'b']
self.assertEqual(summary_b['min'].values[0], 2)
self.assertEqual(summary_b['max'].values[0], 22222222)
self.assertEqual(summary_b['n_null'].values[0], 0)
self.assertEqual(summary_b['perc_null'].values[0], 0)
summary_c = summary[summary['attribute'] == 'c']
self.assertEqual(summary_c['min'].values[0], 3)
self.assertEqual(summary_c['max'].values[0], 33333333)
self.assertEqual(summary_c['n_null'].values[0], 0)
self.assertEqual(summary_c['perc_null'].values[0], 0)
sample_file_csv = os.path.join(CURDIR, 'res', 'sample2.csv')
summary_pkl_file = os.path.join(CURDIR, 'res', 'foo.pkl')
summary, n_rows, label_counts = eda.summarize_big_training_data(
sample_file_csv,
y_name='c',
summary_pkl=summary_pkl_file)
self.assertEqual(n_rows, 10)
self.assertEqual(len(label_counts), 4)
self.assertEqual(set(label_counts.values()), {1, 2, 3, 4})
self.assertEqual(label_counts[''], 3)
self.assertEqual(label_counts['3'], 1)
self.assertEqual(label_counts['33'], 2)
self.assertEqual(label_counts['333'], 4)
self.assertEqual(summary.shape[0], 3)
summary_a = summary[summary['attribute'] == 'a']
self.assertEqual(summary_a['min'].values[0], 11)
self.assertEqual(summary_a['max'].values[0], 1111111111)
self.assertEqual(summary_a['n_null'].values[0], 1)
self.assertEqual(summary_a['perc_null'].values[0], .10)
self.assertEqual(summary_a['n_uniq'].values[0], 10)
summary_b = summary[summary['attribute'] == 'b']
self.assertEqual(summary_b['min'].values[0], 2)
self.assertEqual(summary_b['max'].values[0], 222222222)
self.assertEqual(summary_b['n_null'].values[0], 2)
self.assertEqual(summary_b['perc_null'].values[0], .2)
self.assertEqual(summary_b['n_uniq'].values[0], 9)
summary_c = summary[summary['attribute'] == 'c']
self.assertEqual(summary_c['min'].values[0], 3)
self.assertEqual(summary_c['max'].values[0], 333)
self.assertEqual(summary_c['n_null'].values[0], 3)
self.assertEqual(summary_c['perc_null'].values[0], .3)
self.assertEqual(summary_c['n_uniq'].values[0], 4)
# Check that summary pkl file exists
self.assertTrue(os.path.exists(summary_pkl_file))
# Check saved values can be loaded and is correct
summary2, n_rows2, label_counts2 = eda.load_summary_data(
summary_pkl_file)
self.assertTrue(eda.df_isclose(summary, summary2))
self.assertEqual(n_rows, n_rows2)
self.assertEqual(set(label_counts.items()),
set(label_counts2.items()))
# Delete file
os.remove(summary_pkl_file)
self.assertFalse(os.path.exists(summary_pkl_file))
# Run again with summary_pkl option set to None
summary, n_rows, label_counts = eda.summarize_big_training_data(
sample_file_csv,
y_name='c',
summary_pkl=None)
self.assertFalse(os.path.exists(summary_pkl_file))
def test_count_big_file_value_counts(self):
sample_file_csv = os.path.join(CURDIR, 'res', 'sample4.csv')
value_counts = eda.count_big_file_value_counts(sample_file_csv, 'a')
self.assertEqual(value_counts['1'], 4)
self.assertEqual(value_counts['0'], 2)
value_counts = eda.count_big_file_value_counts(sample_file_csv, 'b')
self.assertEqual(value_counts['x'], 3)
self.assertEqual(value_counts['y'], 3)
value_counts = eda.count_big_file_value_counts(sample_file_csv, 'c')
self.assertEqual(value_counts['0'], 6)
if __name__ == '__main__':
unittest.main()
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from hashlib import sha1
from pants.backend.android.targets.android_library import AndroidLibrary
from pants.backend.android.targets.android_resources import AndroidResources
from pants.backend.jvm.jar_dependency_utils import M2Coordinate
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.tasks.jar_import_products import JarImportProducts
from pants.base.build_environment import get_buildroot
from pants.base.fingerprint_strategy import DefaultFingerprintStrategy
from pants.build_graph.address import Address
from pants.fs.archive import ZIP
from pants.task.task import Task
class AndroidLibraryFingerprintStrategy(DefaultFingerprintStrategy):
def compute_fingerprint(self, target):
"""AndroidLibrary targets need to be re-unpacked if any of the imported jars have changed."""
# TODO(mateor) Create a utility function to add a block of fingerprints to a hasher with caller
# handing in list of items of the same type and a function to extract a fingerprint from each.
if isinstance(target, AndroidLibrary):
hasher = sha1()
for cache_key in sorted(jar.cache_key() for jar in target.imported_jars):
hasher.update(cache_key)
hasher.update(target.payload.fingerprint())
return hasher.hexdigest()
return None
class UnpackLibraries(Task):
"""Unpack AndroidDependency artifacts, including .jar and .aar libraries.
The UnpackLibraries task unpacks artifacts imported by AndroidLibraries, as .aar or .jar files,
through a 'libraries' attribute. The .aar files may contain components which require creation
of some synthetic targets, as well as a classes.jar. The classes.jar is packaged into a
JarDependency target and sent to javac compilation. All jar files are then unpacked-
android_binaries repack the class files of all the android_libraries in their transitive
dependencies into a dex file.
All archives are unpacked only once, regardless of differing include/exclude patterns or how many
targets depend upon it. All targets that depend on a particular artifact will be passed the
unpack_libraries product, which is a directory containing the entire source of the unpacked jars.
These sources are filtered against the AndroidLibrary's include/exclude patterns during the
creation of the dex file.
"""
class MissingElementException(Exception):
"""Raised if an unpacked file or directory unexpectedly does not exist."""
class UnexpectedArchiveType(Exception):
"""Raised if an archive has an extension that is not explicitly handled by this class."""
@classmethod
def prepare(cls, options, round_manager):
super(UnpackLibraries, cls).prepare(options, round_manager)
round_manager.require_data(JarImportProducts)
@classmethod
def product_types(cls):
return ['unpacked_libraries']
@staticmethod
def is_library(target):
"""Return True for AndroidLibrary targets."""
# TODO(mateor) add AndroidBinary support. If include/exclude patterns aren't needed, an
# android_binary should be able to simply declare an android_dependency as a dep.
return isinstance(target, AndroidLibrary)
def __init__(self, *args, **kwargs):
super(UnpackLibraries, self).__init__(*args, **kwargs)
self._created_targets = {}
self._unpacked_archives = set()
def create_classes_jar_target(self, target, coordinate, jar_file):
"""Create a JarLibrary target containing the jar_file as a JarDependency.
:param target: The new JarLibrary will be derived from this AndroidLibrary.
:type target: :class:`pants.backend.android.targets.android_library.AndroidLibrary`
:param coordinate: Archive coordinate fetched by ivy, e.g. 'org.pantsbuild:example::1.0:aar'.
:type coordinate: :class:`pants.backend.jvm.jar_dependency_utils.M2Coordinate`
:param string jar_file: Full path of the classes.jar contained within unpacked aar files.
:returns: A new jar library target.
:rtype: :class:`pants.backend.jvm.targets.jar_library.JarLibrary`
"""
# TODO(mateor) add another JarDependency for every jar under 'libs'.
jar_url = 'file://{0}'.format(jar_file)
jar_dep = JarDependency(org=target.id, name=coordinate.artifact_filename, rev=coordinate.rev,
url=jar_url)
address = Address(self.workdir, '{}-classes.jar'.format(coordinate.artifact_filename))
new_target = self.context.add_new_target(address, JarLibrary, jars=[jar_dep],
derived_from=target)
return new_target
def create_resource_target(self, target, coordinate, manifest, resource_dir):
"""Create an AndroidResources target.
:param target: AndroidLibrary that the new AndroidResources target derives from.
:type target: :class:`pants.backend.android.targets.android_library.AndroidLibrary`
:param coordinate: Archive coordinate fetched by ivy, e.g. 'org.pantsbuild:example::1.0:aar'.
:type coordinate: :class:`pants.backend.jvm.jar_dependency_utils.M2Coordinate`
:param string manifest: The path of 'AndroidManifest.xml'
:param string resource_dir: Full path of the res directory contained within aar files.
:return: A new android resources target.
:rtype::class:`pants.backend.android.targets.AndroidResources`
"""
address = Address(self.workdir, '{}-resources'.format(coordinate.artifact_filename))
new_target = self.context.add_new_target(address, AndroidResources,
manifest=manifest, resource_dir=resource_dir,
derived_from=target)
return new_target
def create_android_library_target(self, target, coordinate, unpacked_aar_location):
"""Create an AndroidLibrary target.
The aar files are unpacked and the contents used to create a new AndroidLibrary target.
:param AndroidLibrary target: AndroidLibrary that the new AndroidLibrary target derives from.
:param coordinate: Archive coordinate fetched by ivy, e.g. 'org.pantsbuild:example::1.0:aar'.
:type coordinate: :class:`pants.backend.jvm.jar_dependency_utils.M2Coordinate`
:param string unpacked_aar_location: Full path of dir holding contents of an unpacked aar file.
:return: A new android library target.
:rtype::class:`pants.backend.android.targets.AndroidLibrary`
"""
# The following three elements of an aar file have names mandated by the aar spec:
# http://tools.android.com/tech-docs/new-build-system/aar-format
# They are said to be mandatory although in practice that assumption only holds for manifest.
manifest = os.path.join(unpacked_aar_location, 'AndroidManifest.xml')
jar_file = os.path.join(unpacked_aar_location, 'classes.jar')
resource_dir = os.path.join(unpacked_aar_location, 'res')
# Sanity check to make sure all .aar files we expect to be unpacked are actually unpacked.
if not os.path.isfile(manifest):
raise self.MissingElementException("An AndroidManifest.xml is expected in every unpacked "
".aar file but none was found in the {} archive "
"for the {} target".format(coordinate, target))
# Depending on the contents of the unpacked aar file, create the dependencies.
deps = []
if os.path.isdir(resource_dir):
deps.append(self.create_resource_target(target, coordinate, manifest, resource_dir))
if os.path.isfile(jar_file):
deps.append(self.create_classes_jar_target(target, coordinate, jar_file))
address = Address(self.workdir, '{}-android_library'.format(coordinate.artifact_filename))
new_target = self.context.add_new_target(address, AndroidLibrary,
manifest=manifest,
include_patterns=target.payload.include_patterns,
exclude_patterns=target.payload.exclude_patterns,
dependencies=deps,
derived_from=target)
return new_target
def _unpack_artifacts(self, jar_imports):
# Unpack the aar and jar library artifacts. If the aar files have a jar in the contents,
# unpack that jar as well.
for coordinate, aar_or_jar in jar_imports:
jar_outdir = self.unpacked_jar_location(coordinate)
if 'jar' == coordinate.ext:
jar_file = aar_or_jar
elif 'aar' == coordinate.ext:
unpacked_aar_destination = self.unpacked_aar_location(coordinate)
jar_file = os.path.join(unpacked_aar_destination, 'classes.jar')
# Unpack .aar files.
if coordinate not in self._unpacked_archives:
ZIP.extract(aar_or_jar, unpacked_aar_destination)
self._unpacked_archives.add(aar_or_jar)
# Create an .aar/classes.jar signature for self._unpacked_archives.
coordinate = M2Coordinate(org=coordinate.org,
name=coordinate.name,
rev=coordinate.rev,
classifier=coordinate.classifier,
ext='classes.jar')
else:
raise self.UnexpectedArchiveType('Android dependencies can be .aar or .jar archives '
'(was: {} at {})'.format(coordinate, aar_or_jar))
# Unpack the jar files.
if coordinate not in self._unpacked_archives and os.path.isfile(jar_file):
ZIP.extract(jar_file, jar_outdir)
self._unpacked_archives.add(aar_or_jar)
def _create_target(self, target, coordinates):
# Create a target for the components of an unpacked .aar file.
for coordinate in coordinates:
# The contents of the unpacked aar file must be made into an AndroidLibrary target.
if 'aar' == coordinate.ext:
if coordinate not in self._created_targets:
unpacked_location = self.unpacked_aar_location(coordinate)
if not os.path.isdir(unpacked_location):
raise self.MissingElementException('{}: Expected to unpack {} at {} but did not!'
.format(target, coordinate, unpacked_location))
new_target = self.create_android_library_target(target,
coordinate,
unpacked_location)
self._created_targets[coordinate] = new_target
target.inject_dependency(self._created_targets[coordinate].address)
# The unpacked_libraries product is a dir containing the full unpacked source. The files
# that match the include/exclude patterns are calculated during DxCompile.
unpacked_products = self.context.products.get('unpacked_libraries')
unpacked_products.add(target, get_buildroot()).append(self.unpacked_jar_location(coordinate))
def execute(self):
jar_import_products = self.context.products.get_data(JarImportProducts)
library_targets = self.context.targets(predicate=self.is_library)
with self.invalidated(library_targets,
fingerprint_strategy=AndroidLibraryFingerprintStrategy(),
invalidate_dependents=True) as invalidation_check:
for vt in invalidation_check.invalid_vts:
jar_imports = jar_import_products.imports(vt.target)
if jar_imports:
self._unpack_artifacts(jar_imports)
# Create the new targets from the contents of unpacked aar files.
for target in library_targets:
jar_imports = jar_import_products.imports(target)
if jar_imports:
self._create_target(target, (jar_import.coordinate for jar_import in jar_imports))
def unpacked_jar_location(self, coordinate):
"""Location for unpacked jar files, whether imported as-is or found inside an aar file."""
return os.path.join(self.workdir, 'explode-jars', coordinate.artifact_filename)
def unpacked_aar_location(self, coordinate):
"""Output location for unpacking .aar archives."""
return os.path.join(self.workdir, coordinate.artifact_filename)
|
|
"""
GDB extension that adds Cython support.
"""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import sys
import textwrap
import traceback
import functools
import itertools
import collections
import gdb
try: # python 2
UNICODE = unicode
BYTES = str
except NameError: # python 3
UNICODE = str
BYTES = bytes
try:
from lxml import etree
have_lxml = True
except ImportError:
have_lxml = False
try:
# Python 2.5
from xml.etree import cElementTree as etree
except ImportError:
try:
# Python 2.5
from xml.etree import ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
# normal ElementTree install
import elementtree.ElementTree as etree
try:
import pygments.lexers
import pygments.formatters
except ImportError:
pygments = None
sys.stderr.write("Install pygments for colorized source code.\n")
if hasattr(gdb, 'string_to_argv'):
from gdb import string_to_argv
else:
from shlex import split as string_to_argv
from Cython.Debugger import libpython
# C or Python type
CObject = 'CObject'
PythonObject = 'PythonObject'
_data_types = dict(CObject=CObject, PythonObject=PythonObject)
_filesystemencoding = sys.getfilesystemencoding() or 'UTF-8'
# decorators
def dont_suppress_errors(function):
"*sigh*, readline"
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception:
traceback.print_exc()
raise
return wrapper
def default_selected_gdb_frame(err=True):
def decorator(function):
@functools.wraps(function)
def wrapper(self, frame=None, *args, **kwargs):
try:
frame = frame or gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError("No frame is currently selected.")
if err and frame.name() is None:
raise NoFunctionNameInFrameError()
return function(self, frame, *args, **kwargs)
return wrapper
return decorator
def require_cython_frame(function):
@functools.wraps(function)
@require_running_program
def wrapper(self, *args, **kwargs):
frame = kwargs.get('frame') or gdb.selected_frame()
if not self.is_cython_function(frame):
raise gdb.GdbError('Selected frame does not correspond with a '
'Cython function we know about.')
return function(self, *args, **kwargs)
return wrapper
def dispatch_on_frame(c_command, python_command=None):
def decorator(function):
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
is_cy = self.is_cython_function()
is_py = self.is_python_function()
if is_cy or (is_py and not python_command):
function(self, *args, **kwargs)
elif is_py:
gdb.execute(python_command)
elif self.is_relevant_function():
gdb.execute(c_command)
else:
raise gdb.GdbError("Not a function cygdb knows about. "
"Use the normal GDB commands instead.")
return wrapper
return decorator
def require_running_program(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError("No frame is currently selected.")
return function(*args, **kwargs)
return wrapper
def gdb_function_value_to_unicode(function):
@functools.wraps(function)
def wrapper(self, string, *args, **kwargs):
if isinstance(string, gdb.Value):
string = string.string()
return function(self, string, *args, **kwargs)
return wrapper
# Classes that represent the debug information
# Don't rename the parameters of these classes, they come directly from the XML
class CythonModule(object):
def __init__(self, module_name, filename, c_filename):
self.name = module_name
self.filename = filename
self.c_filename = c_filename
self.globals = {}
# {cython_lineno: min(c_linenos)}
self.lineno_cy2c = {}
# {c_lineno: cython_lineno}
self.lineno_c2cy = {}
self.functions = {}
class CythonVariable(object):
def __init__(self, name, cname, qualified_name, type, lineno):
self.name = name
self.cname = cname
self.qualified_name = qualified_name
self.type = type
self.lineno = int(lineno)
class CythonFunction(CythonVariable):
def __init__(self,
module,
name,
cname,
pf_cname,
qualified_name,
lineno,
type=CObject,
is_initmodule_function="False"):
super(CythonFunction, self).__init__(name,
cname,
qualified_name,
type,
lineno)
self.module = module
self.pf_cname = pf_cname
self.is_initmodule_function = is_initmodule_function == "True"
self.locals = {}
self.arguments = []
self.step_into_functions = set()
# General purpose classes
class CythonBase(object):
@default_selected_gdb_frame(err=False)
def is_cython_function(self, frame):
return frame.name() in self.cy.functions_by_cname
@default_selected_gdb_frame(err=False)
def is_python_function(self, frame):
"""
Tells if a frame is associated with a Python function.
If we can't read the Python frame information, don't regard it as such.
"""
if frame.name() == 'PyEval_EvalFrameEx':
pyframe = libpython.Frame(frame).get_pyop()
return pyframe and not pyframe.is_optimized_out()
return False
@default_selected_gdb_frame()
def get_c_function_name(self, frame):
return frame.name()
@default_selected_gdb_frame()
def get_c_lineno(self, frame):
return frame.find_sal().line
@default_selected_gdb_frame()
def get_cython_function(self, frame):
result = self.cy.functions_by_cname.get(frame.name())
if result is None:
raise NoCythonFunctionInFrameError()
return result
@default_selected_gdb_frame()
def get_cython_lineno(self, frame):
"""
Get the current Cython line number. Returns 0 if there is no
correspondence between the C and Cython code.
"""
cyfunc = self.get_cython_function(frame)
return cyfunc.module.lineno_c2cy.get(self.get_c_lineno(frame), 0)
@default_selected_gdb_frame()
def get_source_desc(self, frame):
filename = lineno = lexer = None
if self.is_cython_function(frame):
filename = self.get_cython_function(frame).module.filename
lineno = self.get_cython_lineno(frame)
if pygments:
lexer = pygments.lexers.CythonLexer(stripall=False)
elif self.is_python_function(frame):
pyframeobject = libpython.Frame(frame).get_pyop()
if not pyframeobject:
raise gdb.GdbError(
'Unable to read information on python frame')
filename = pyframeobject.filename()
lineno = pyframeobject.current_line_num()
if pygments:
lexer = pygments.lexers.PythonLexer(stripall=False)
else:
symbol_and_line_obj = frame.find_sal()
if not symbol_and_line_obj or not symbol_and_line_obj.symtab:
filename = None
lineno = 0
else:
filename = symbol_and_line_obj.symtab.fullname()
lineno = symbol_and_line_obj.line
if pygments:
lexer = pygments.lexers.CLexer(stripall=False)
return SourceFileDescriptor(filename, lexer), lineno
@default_selected_gdb_frame()
def get_source_line(self, frame):
source_desc, lineno = self.get_source_desc()
return source_desc.get_source(lineno)
@default_selected_gdb_frame()
def is_relevant_function(self, frame):
"""
returns whether we care about a frame on the user-level when debugging
Cython code
"""
name = frame.name()
older_frame = frame.older()
if self.is_cython_function(frame) or self.is_python_function(frame):
return True
elif older_frame and self.is_cython_function(older_frame):
# check for direct C function call from a Cython function
cython_func = self.get_cython_function(older_frame)
return name in cython_func.step_into_functions
return False
@default_selected_gdb_frame(err=False)
def print_stackframe(self, frame, index, is_c=False):
"""
Print a C, Cython or Python stack frame and the line of source code
if available.
"""
# do this to prevent the require_cython_frame decorator from
# raising GdbError when calling self.cy.cy_cvalue.invoke()
selected_frame = gdb.selected_frame()
frame.select()
try:
source_desc, lineno = self.get_source_desc(frame)
except NoFunctionNameInFrameError:
print('#%-2d Unknown Frame (compile with -g)' % index)
return
if not is_c and self.is_python_function(frame):
pyframe = libpython.Frame(frame).get_pyop()
if pyframe is None or pyframe.is_optimized_out():
# print this python function as a C function
return self.print_stackframe(frame, index, is_c=True)
func_name = pyframe.co_name
func_cname = 'PyEval_EvalFrameEx'
func_args = []
elif self.is_cython_function(frame):
cyfunc = self.get_cython_function(frame)
f = lambda arg: self.cy.cy_cvalue.invoke(arg, frame=frame)
func_name = cyfunc.name
func_cname = cyfunc.cname
func_args = [] # [(arg, f(arg)) for arg in cyfunc.arguments]
else:
source_desc, lineno = self.get_source_desc(frame)
func_name = frame.name()
func_cname = func_name
func_args = []
try:
gdb_value = gdb.parse_and_eval(func_cname)
except RuntimeError:
func_address = 0
else:
func_address = gdb_value.address
if not isinstance(func_address, int):
# Seriously? Why is the address not an int?
if not isinstance(func_address, (str, bytes)):
func_address = str(func_address)
func_address = int(func_address.split()[0], 0)
a = ', '.join('%s=%s' % (name, val) for name, val in func_args)
sys.stdout.write('#%-2d 0x%016x in %s(%s)' % (index, func_address, func_name, a))
if source_desc.filename is not None:
sys.stdout.write(' at %s:%s' % (source_desc.filename, lineno))
sys.stdout.write('\n')
try:
sys.stdout.write(' ' + source_desc.get_source(lineno))
except gdb.GdbError:
pass
selected_frame.select()
def get_remote_cython_globals_dict(self):
m = gdb.parse_and_eval('__pyx_m')
try:
PyModuleObject = gdb.lookup_type('PyModuleObject')
except RuntimeError:
raise gdb.GdbError(textwrap.dedent("""\
Unable to lookup type PyModuleObject, did you compile python
with debugging support (-g)?"""))
m = m.cast(PyModuleObject.pointer())
return m['md_dict']
def get_cython_globals_dict(self):
"""
Get the Cython globals dict where the remote names are turned into
local strings.
"""
remote_dict = self.get_remote_cython_globals_dict()
pyobject_dict = libpython.PyObjectPtr.from_pyobject_ptr(remote_dict)
result = {}
seen = set()
for k, v in pyobject_dict.items():
result[k.proxyval(seen)] = v
return result
def print_gdb_value(self, name, value, max_name_length=None, prefix=''):
if libpython.pretty_printer_lookup(value):
typename = ''
else:
typename = '(%s) ' % (value.type,)
if max_name_length is None:
print('%s%s = %s%s' % (prefix, name, typename, value))
else:
print('%s%-*s = %s%s' % (prefix, max_name_length, name, typename, value))
def is_initialized(self, cython_func, local_name):
cyvar = cython_func.locals[local_name]
cur_lineno = self.get_cython_lineno()
if '->' in cyvar.cname:
# Closed over free variable
if cur_lineno > cython_func.lineno:
if cyvar.type == PythonObject:
return int(gdb.parse_and_eval(cyvar.cname))
return True
return False
return cur_lineno > cyvar.lineno
class SourceFileDescriptor(object):
def __init__(self, filename, lexer, formatter=None):
self.filename = filename
self.lexer = lexer
self.formatter = formatter
def valid(self):
return self.filename is not None
def lex(self, code):
if pygments and self.lexer and parameters.colorize_code:
bg = parameters.terminal_background.value
if self.formatter is None:
formatter = pygments.formatters.TerminalFormatter(bg=bg)
else:
formatter = self.formatter
return pygments.highlight(code, self.lexer, formatter)
return code
def _get_source(self, start, stop, lex_source, mark_line, lex_entire):
with open(self.filename) as f:
# to provide "correct" colouring, the entire code needs to be
# lexed. However, this makes a lot of things terribly slow, so
# we decide not to. Besides, it's unlikely to matter.
if lex_source and lex_entire:
f = self.lex(f.read()).splitlines()
slice = itertools.islice(f, start - 1, stop - 1)
for idx, line in enumerate(slice):
if start + idx == mark_line:
prefix = '>'
else:
prefix = ' '
if lex_source and not lex_entire:
line = self.lex(line)
yield '%s %4d %s' % (prefix, start + idx, line.rstrip())
def get_source(self, start, stop=None, lex_source=True, mark_line=0,
lex_entire=False):
exc = gdb.GdbError('Unable to retrieve source code')
if not self.filename:
raise exc
start = max(start, 1)
if stop is None:
stop = start + 1
try:
return '\n'.join(
self._get_source(start, stop, lex_source, mark_line, lex_entire))
except IOError:
raise exc
# Errors
class CyGDBError(gdb.GdbError):
"""
Base class for Cython-command related erorrs
"""
def __init__(self, *args):
args = args or (self.msg,)
super(CyGDBError, self).__init__(*args)
class NoCythonFunctionInFrameError(CyGDBError):
"""
raised when the user requests the current cython function, which is
unavailable
"""
msg = "Current function is a function cygdb doesn't know about"
class NoFunctionNameInFrameError(NoCythonFunctionInFrameError):
"""
raised when the name of the C function could not be determined
in the current C stack frame
"""
msg = ('C function name could not be determined in the current C stack '
'frame')
# Parameters
class CythonParameter(gdb.Parameter):
"""
Base class for cython parameters
"""
def __init__(self, name, command_class, parameter_class, default=None):
self.show_doc = self.set_doc = self.__class__.__doc__
super(CythonParameter, self).__init__(name, command_class,
parameter_class)
if default is not None:
self.value = default
def __bool__(self):
return bool(self.value)
__nonzero__ = __bool__ # Python 2
class CompleteUnqualifiedFunctionNames(CythonParameter):
"""
Have 'cy break' complete unqualified function or method names.
"""
class ColorizeSourceCode(CythonParameter):
"""
Tell cygdb whether to colorize source code.
"""
class TerminalBackground(CythonParameter):
"""
Tell cygdb about the user's terminal background (light or dark).
"""
class CythonParameters(object):
"""
Simple container class that might get more functionality in the distant
future (mostly to remind us that we're dealing with parameters).
"""
def __init__(self):
self.complete_unqualified = CompleteUnqualifiedFunctionNames(
'cy_complete_unqualified',
gdb.COMMAND_BREAKPOINTS,
gdb.PARAM_BOOLEAN,
True)
self.colorize_code = ColorizeSourceCode(
'cy_colorize_code',
gdb.COMMAND_FILES,
gdb.PARAM_BOOLEAN,
True)
self.terminal_background = TerminalBackground(
'cy_terminal_background_color',
gdb.COMMAND_FILES,
gdb.PARAM_STRING,
"dark")
parameters = CythonParameters()
# Commands
class CythonCommand(gdb.Command, CythonBase):
"""
Base class for Cython commands
"""
command_class = gdb.COMMAND_NONE
@classmethod
def _register(cls, clsname, args, kwargs):
if not hasattr(cls, 'completer_class'):
return cls(clsname, cls.command_class, *args, **kwargs)
else:
return cls(clsname, cls.command_class, cls.completer_class,
*args, **kwargs)
@classmethod
def register(cls, *args, **kwargs):
alias = getattr(cls, 'alias', None)
if alias:
cls._register(cls.alias, args, kwargs)
return cls._register(cls.name, args, kwargs)
class CyCy(CythonCommand):
"""
Invoke a Cython command. Available commands are:
cy import
cy break
cy step
cy next
cy run
cy cont
cy finish
cy up
cy down
cy select
cy bt / cy backtrace
cy list
cy print
cy set
cy locals
cy globals
cy exec
"""
name = 'cy'
command_class = gdb.COMMAND_NONE
completer_class = gdb.COMPLETE_COMMAND
def __init__(self, name, command_class, completer_class):
# keep the signature 2.5 compatible (i.e. do not use f(*a, k=v)
super(CythonCommand, self).__init__(name, command_class,
completer_class, prefix=True)
commands = dict(
# GDB commands
import_ = CyImport.register(),
break_ = CyBreak.register(),
step = CyStep.register(),
next = CyNext.register(),
run = CyRun.register(),
cont = CyCont.register(),
finish = CyFinish.register(),
up = CyUp.register(),
down = CyDown.register(),
select = CySelect.register(),
bt = CyBacktrace.register(),
list = CyList.register(),
print_ = CyPrint.register(),
locals = CyLocals.register(),
globals = CyGlobals.register(),
exec_ = libpython.FixGdbCommand('cy exec', '-cy-exec'),
_exec = CyExec.register(),
set = CySet.register(),
# GDB functions
cy_cname = CyCName('cy_cname'),
cy_cvalue = CyCValue('cy_cvalue'),
cy_lineno = CyLine('cy_lineno'),
cy_eval = CyEval('cy_eval'),
)
for command_name, command in commands.items():
command.cy = self
setattr(self, command_name, command)
self.cy = self
# Cython module namespace
self.cython_namespace = {}
# maps (unique) qualified function names (e.g.
# cythonmodule.ClassName.method_name) to the CythonFunction object
self.functions_by_qualified_name = {}
# unique cnames of Cython functions
self.functions_by_cname = {}
# map function names like method_name to a list of all such
# CythonFunction objects
self.functions_by_name = collections.defaultdict(list)
class CyImport(CythonCommand):
"""
Import debug information outputted by the Cython compiler
Example: cy import FILE...
"""
name = 'cy import'
command_class = gdb.COMMAND_STATUS
completer_class = gdb.COMPLETE_FILENAME
def invoke(self, args, from_tty):
if isinstance(args, BYTES):
args = args.decode(_filesystemencoding)
for arg in string_to_argv(args):
try:
f = open(arg)
except OSError as e:
raise gdb.GdbError('Unable to open file %r: %s' % (args, e.args[1]))
t = etree.parse(f)
for module in t.getroot():
cython_module = CythonModule(**module.attrib)
self.cy.cython_namespace[cython_module.name] = cython_module
for variable in module.find('Globals'):
d = variable.attrib
cython_module.globals[d['name']] = CythonVariable(**d)
for function in module.find('Functions'):
cython_function = CythonFunction(module=cython_module,
**function.attrib)
# update the global function mappings
name = cython_function.name
qname = cython_function.qualified_name
self.cy.functions_by_name[name].append(cython_function)
self.cy.functions_by_qualified_name[
cython_function.qualified_name] = cython_function
self.cy.functions_by_cname[
cython_function.cname] = cython_function
d = cython_module.functions[qname] = cython_function
for local in function.find('Locals'):
d = local.attrib
cython_function.locals[d['name']] = CythonVariable(**d)
for step_into_func in function.find('StepIntoFunctions'):
d = step_into_func.attrib
cython_function.step_into_functions.add(d['name'])
cython_function.arguments.extend(
funcarg.tag for funcarg in function.find('Arguments'))
for marker in module.find('LineNumberMapping'):
cython_lineno = int(marker.attrib['cython_lineno'])
c_linenos = list(map(int, marker.attrib['c_linenos'].split()))
cython_module.lineno_cy2c[cython_lineno] = min(c_linenos)
for c_lineno in c_linenos:
cython_module.lineno_c2cy[c_lineno] = cython_lineno
class CyBreak(CythonCommand):
"""
Set a breakpoint for Cython code using Cython qualified name notation, e.g.:
cy break cython_modulename.ClassName.method_name...
or normal notation:
cy break function_or_method_name...
or for a line number:
cy break cython_module:lineno...
Set a Python breakpoint:
Break on any function or method named 'func' in module 'modname'
cy break -p modname.func...
Break on any function or method named 'func'
cy break -p func...
"""
name = 'cy break'
command_class = gdb.COMMAND_BREAKPOINTS
def _break_pyx(self, name):
modulename, _, lineno = name.partition(':')
lineno = int(lineno)
if modulename:
cython_module = self.cy.cython_namespace[modulename]
else:
cython_module = self.get_cython_function().module
if lineno in cython_module.lineno_cy2c:
c_lineno = cython_module.lineno_cy2c[lineno]
breakpoint = '%s:%s' % (cython_module.c_filename, c_lineno)
gdb.execute('break ' + breakpoint)
else:
raise gdb.GdbError("Not a valid line number. "
"Does it contain actual code?")
def _break_funcname(self, funcname):
func = self.cy.functions_by_qualified_name.get(funcname)
if func and func.is_initmodule_function:
func = None
break_funcs = [func]
if not func:
funcs = self.cy.functions_by_name.get(funcname) or []
funcs = [f for f in funcs if not f.is_initmodule_function]
if not funcs:
gdb.execute('break ' + funcname)
return
if len(funcs) > 1:
# multiple functions, let the user pick one
print('There are multiple such functions:')
for idx, func in enumerate(funcs):
print('%3d) %s' % (idx, func.qualified_name))
while True:
try:
result = input(
"Select a function, press 'a' for all "
"functions or press 'q' or '^D' to quit: ")
except EOFError:
return
else:
if result.lower() == 'q':
return
elif result.lower() == 'a':
break_funcs = funcs
break
elif (result.isdigit() and
0 <= int(result) < len(funcs)):
break_funcs = [funcs[int(result)]]
break
else:
print('Not understood...')
else:
break_funcs = [funcs[0]]
for func in break_funcs:
gdb.execute('break %s' % func.cname)
if func.pf_cname:
gdb.execute('break %s' % func.pf_cname)
def invoke(self, function_names, from_tty):
if isinstance(function_names, BYTES):
function_names = function_names.decode(_filesystemencoding)
argv = string_to_argv(function_names)
if function_names.startswith('-p'):
argv = argv[1:]
python_breakpoints = True
else:
python_breakpoints = False
for funcname in argv:
if python_breakpoints:
gdb.execute('py-break %s' % funcname)
elif ':' in funcname:
self._break_pyx(funcname)
else:
self._break_funcname(funcname)
@dont_suppress_errors
def complete(self, text, word):
# Filter init-module functions (breakpoints can be set using
# modulename:linenumber).
names = [n for n, L in self.cy.functions_by_name.items()
if any(not f.is_initmodule_function for f in L)]
qnames = [n for n, f in self.cy.functions_by_qualified_name.items()
if not f.is_initmodule_function]
if parameters.complete_unqualified:
all_names = itertools.chain(qnames, names)
else:
all_names = qnames
words = text.strip().split()
if not words or '.' not in words[-1]:
# complete unqualified
seen = set(text[:-len(word)].split())
return [n for n in all_names
if n.startswith(word) and n not in seen]
# complete qualified name
lastword = words[-1]
compl = [n for n in qnames if n.startswith(lastword)]
if len(lastword) > len(word):
# readline sees something (e.g. a '.') as a word boundary, so don't
# "recomplete" this prefix
strip_prefix_length = len(lastword) - len(word)
compl = [n[strip_prefix_length:] for n in compl]
return compl
class CythonInfo(CythonBase, libpython.PythonInfo):
"""
Implementation of the interface dictated by libpython.LanguageInfo.
"""
def lineno(self, frame):
# Take care of the Python and Cython levels. We need to care for both
# as we can't simply dispath to 'py-step', since that would work for
# stepping through Python code, but it would not step back into Cython-
# related code. The C level should be dispatched to the 'step' command.
if self.is_cython_function(frame):
return self.get_cython_lineno(frame)
return super(CythonInfo, self).lineno(frame)
def get_source_line(self, frame):
try:
line = super(CythonInfo, self).get_source_line(frame)
except gdb.GdbError:
return None
else:
return line.strip() or None
def exc_info(self, frame):
if self.is_python_function:
return super(CythonInfo, self).exc_info(frame)
def runtime_break_functions(self):
if self.is_cython_function():
return self.get_cython_function().step_into_functions
return ()
def static_break_functions(self):
result = ['PyEval_EvalFrameEx']
result.extend(self.cy.functions_by_cname)
return result
class CythonExecutionControlCommand(CythonCommand,
libpython.ExecutionControlCommandBase):
@classmethod
def register(cls):
return cls(cls.name, cython_info)
class CyStep(CythonExecutionControlCommand, libpython.PythonStepperMixin):
"Step through Cython, Python or C code."
name = 'cy -step'
stepinto = True
def invoke(self, args, from_tty):
if self.is_python_function():
self.python_step(self.stepinto)
elif not self.is_cython_function():
if self.stepinto:
command = 'step'
else:
command = 'next'
self.finish_executing(gdb.execute(command, to_string=True))
else:
self.step(stepinto=self.stepinto)
class CyNext(CyStep):
"Step-over Cython, Python or C code."
name = 'cy -next'
stepinto = False
class CyRun(CythonExecutionControlCommand):
"""
Run a Cython program. This is like the 'run' command, except that it
displays Cython or Python source lines as well
"""
name = 'cy run'
invoke = CythonExecutionControlCommand.run
class CyCont(CythonExecutionControlCommand):
"""
Continue a Cython program. This is like the 'run' command, except that it
displays Cython or Python source lines as well.
"""
name = 'cy cont'
invoke = CythonExecutionControlCommand.cont
class CyFinish(CythonExecutionControlCommand):
"""
Execute until the function returns.
"""
name = 'cy finish'
invoke = CythonExecutionControlCommand.finish
class CyUp(CythonCommand):
"""
Go up a Cython, Python or relevant C frame.
"""
name = 'cy up'
_command = 'up'
def invoke(self, *args):
try:
gdb.execute(self._command, to_string=True)
while not self.is_relevant_function(gdb.selected_frame()):
gdb.execute(self._command, to_string=True)
except RuntimeError as e:
raise gdb.GdbError(*e.args)
frame = gdb.selected_frame()
index = 0
while frame:
frame = frame.older()
index += 1
self.print_stackframe(index=index - 1)
class CyDown(CyUp):
"""
Go down a Cython, Python or relevant C frame.
"""
name = 'cy down'
_command = 'down'
class CySelect(CythonCommand):
"""
Select a frame. Use frame numbers as listed in `cy backtrace`.
This command is useful because `cy backtrace` prints a reversed backtrace.
"""
name = 'cy select'
def invoke(self, stackno, from_tty):
try:
stackno = int(stackno)
except ValueError:
raise gdb.GdbError("Not a valid number: %r" % (stackno,))
frame = gdb.selected_frame()
while frame.newer():
frame = frame.newer()
stackdepth = libpython.stackdepth(frame)
try:
gdb.execute('select %d' % (stackdepth - stackno - 1,))
except RuntimeError as e:
raise gdb.GdbError(*e.args)
class CyBacktrace(CythonCommand):
'Print the Cython stack'
name = 'cy bt'
alias = 'cy backtrace'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
@require_running_program
def invoke(self, args, from_tty):
# get the first frame
frame = gdb.selected_frame()
while frame.older():
frame = frame.older()
print_all = args == '-a'
index = 0
while frame:
try:
is_relevant = self.is_relevant_function(frame)
except CyGDBError:
is_relevant = False
if print_all or is_relevant:
self.print_stackframe(frame, index)
index += 1
frame = frame.newer()
class CyList(CythonCommand):
"""
List Cython source code. To disable to customize colouring see the cy_*
parameters.
"""
name = 'cy list'
command_class = gdb.COMMAND_FILES
completer_class = gdb.COMPLETE_NONE
# @dispatch_on_frame(c_command='list')
def invoke(self, _, from_tty):
sd, lineno = self.get_source_desc()
source = sd.get_source(lineno - 5, lineno + 5, mark_line=lineno,
lex_entire=True)
print(source)
class CyPrint(CythonCommand):
"""
Print a Cython variable using 'cy-print x' or 'cy-print module.function.x'
"""
name = 'cy print'
command_class = gdb.COMMAND_DATA
def invoke(self, name, from_tty, max_name_length=None):
if self.is_python_function():
return gdb.execute('py-print ' + name)
elif self.is_cython_function():
value = self.cy.cy_cvalue.invoke(name.lstrip('*'))
for c in name:
if c == '*':
value = value.dereference()
else:
break
self.print_gdb_value(name, value, max_name_length)
else:
gdb.execute('print ' + name)
def complete(self):
if self.is_cython_function():
f = self.get_cython_function()
return list(itertools.chain(f.locals, f.globals))
else:
return []
sortkey = lambda item: item[0].lower()
class CyLocals(CythonCommand):
"""
List the locals from the current Cython frame.
"""
name = 'cy locals'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
@dispatch_on_frame(c_command='info locals', python_command='py-locals')
def invoke(self, args, from_tty):
cython_function = self.get_cython_function()
if cython_function.is_initmodule_function:
self.cy.globals.invoke(args, from_tty)
return
local_cython_vars = cython_function.locals
max_name_length = len(max(local_cython_vars, key=len))
for name, cyvar in sorted(local_cython_vars.items(), key=sortkey):
if self.is_initialized(self.get_cython_function(), cyvar.name):
value = gdb.parse_and_eval(cyvar.cname)
if not value.is_optimized_out:
self.print_gdb_value(cyvar.name, value,
max_name_length, '')
class CyGlobals(CyLocals):
"""
List the globals from the current Cython module.
"""
name = 'cy globals'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
@dispatch_on_frame(c_command='info variables', python_command='py-globals')
def invoke(self, args, from_tty):
global_python_dict = self.get_cython_globals_dict()
module_globals = self.get_cython_function().module.globals
max_globals_len = 0
max_globals_dict_len = 0
if module_globals:
max_globals_len = len(max(module_globals, key=len))
if global_python_dict:
max_globals_dict_len = len(max(global_python_dict))
max_name_length = max(max_globals_len, max_globals_dict_len)
seen = set()
print('Python globals:')
for k, v in sorted(global_python_dict.items(), key=sortkey):
v = v.get_truncated_repr(libpython.MAX_OUTPUT_LEN)
seen.add(k)
print(' %-*s = %s' % (max_name_length, k, v))
print('C globals:')
for name, cyvar in sorted(module_globals.items(), key=sortkey):
if name not in seen:
try:
value = gdb.parse_and_eval(cyvar.cname)
except RuntimeError:
pass
else:
if not value.is_optimized_out:
self.print_gdb_value(cyvar.name, value,
max_name_length, ' ')
class EvaluateOrExecuteCodeMixin(object):
"""
Evaluate or execute Python code in a Cython or Python frame. The 'evalcode'
method evaluations Python code, prints a traceback if an exception went
uncaught, and returns any return value as a gdb.Value (NULL on exception).
"""
def _fill_locals_dict(self, executor, local_dict_pointer):
"Fill a remotely allocated dict with values from the Cython C stack"
cython_func = self.get_cython_function()
for name, cyvar in cython_func.locals.items():
if cyvar.type == PythonObject and self.is_initialized(cython_func, name):
try:
val = gdb.parse_and_eval(cyvar.cname)
except RuntimeError:
continue
else:
if val.is_optimized_out:
continue
pystringp = executor.alloc_pystring(name)
code = '''
(PyObject *) PyDict_SetItem(
(PyObject *) %d,
(PyObject *) %d,
(PyObject *) %s)
''' % (local_dict_pointer, pystringp, cyvar.cname)
try:
if gdb.parse_and_eval(code) < 0:
gdb.parse_and_eval('PyErr_Print()')
raise gdb.GdbError("Unable to execute Python code.")
finally:
# PyDict_SetItem doesn't steal our reference
executor.xdecref(pystringp)
def _find_first_cython_or_python_frame(self):
frame = gdb.selected_frame()
while frame:
if (self.is_cython_function(frame) or
self.is_python_function(frame)):
frame.select()
return frame
frame = frame.older()
raise gdb.GdbError("There is no Cython or Python frame on the stack.")
def _evalcode_cython(self, executor, code, input_type):
with libpython.FetchAndRestoreError():
# get the dict of Cython globals and construct a dict in the
# inferior with Cython locals
global_dict = gdb.parse_and_eval(
'(PyObject *) PyModule_GetDict(__pyx_m)')
local_dict = gdb.parse_and_eval('(PyObject *) PyDict_New()')
try:
self._fill_locals_dict(executor,
libpython.pointervalue(local_dict))
result = executor.evalcode(code, input_type, global_dict,
local_dict)
finally:
executor.xdecref(libpython.pointervalue(local_dict))
return result
def evalcode(self, code, input_type):
"""
Evaluate `code` in a Python or Cython stack frame using the given
`input_type`.
"""
frame = self._find_first_cython_or_python_frame()
executor = libpython.PythonCodeExecutor()
if self.is_python_function(frame):
return libpython._evalcode_python(executor, code, input_type)
return self._evalcode_cython(executor, code, input_type)
class CyExec(CythonCommand, libpython.PyExec, EvaluateOrExecuteCodeMixin):
"""
Execute Python code in the nearest Python or Cython frame.
"""
name = '-cy-exec'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
def invoke(self, expr, from_tty):
expr, input_type = self.readcode(expr)
executor = libpython.PythonCodeExecutor()
executor.xdecref(self.evalcode(expr, executor.Py_single_input))
class CySet(CythonCommand):
"""
Set a Cython variable to a certain value
cy set my_cython_c_variable = 10
cy set my_cython_py_variable = $cy_eval("{'doner': 'kebab'}")
This is equivalent to
set $cy_value("my_cython_variable") = 10
"""
name = 'cy set'
command_class = gdb.COMMAND_DATA
completer_class = gdb.COMPLETE_NONE
@require_cython_frame
def invoke(self, expr, from_tty):
name_and_expr = expr.split('=', 1)
if len(name_and_expr) != 2:
raise gdb.GdbError("Invalid expression. Use 'cy set var = expr'.")
varname, expr = name_and_expr
cname = self.cy.cy_cname.invoke(varname.strip())
gdb.execute("set %s = %s" % (cname, expr))
# Functions
class CyCName(gdb.Function, CythonBase):
"""
Get the C name of a Cython variable in the current context.
Examples:
print $cy_cname("function")
print $cy_cname("Class.method")
print $cy_cname("module.function")
"""
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
frame = frame or gdb.selected_frame()
cname = None
if self.is_cython_function(frame):
cython_function = self.get_cython_function(frame)
if cyname in cython_function.locals:
cname = cython_function.locals[cyname].cname
elif cyname in cython_function.module.globals:
cname = cython_function.module.globals[cyname].cname
else:
qname = '%s.%s' % (cython_function.module.name, cyname)
if qname in cython_function.module.functions:
cname = cython_function.module.functions[qname].cname
if not cname:
cname = self.cy.functions_by_qualified_name.get(cyname)
if not cname:
raise gdb.GdbError('No such Cython variable: %s' % cyname)
return cname
class CyCValue(CyCName):
"""
Get the value of a Cython variable.
"""
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
globals_dict = self.get_cython_globals_dict()
cython_function = self.get_cython_function(frame)
if self.is_initialized(cython_function, cyname):
cname = super(CyCValue, self).invoke(cyname, frame=frame)
return gdb.parse_and_eval(cname)
elif cyname in globals_dict:
return globals_dict[cyname]._gdbval
else:
raise gdb.GdbError("Variable %s is not initialized." % cyname)
class CyLine(gdb.Function, CythonBase):
"""
Get the current Cython line.
"""
@require_cython_frame
def invoke(self):
return self.get_cython_lineno()
class CyEval(gdb.Function, CythonBase, EvaluateOrExecuteCodeMixin):
"""
Evaluate Python code in the nearest Python or Cython frame and return
"""
@gdb_function_value_to_unicode
def invoke(self, python_expression):
input_type = libpython.PythonCodeExecutor.Py_eval_input
return self.evalcode(python_expression, input_type)
cython_info = CythonInfo()
cy = CyCy.register()
cython_info.cy = cy
def register_defines():
libpython.source_gdb_script(textwrap.dedent("""\
define cy step
cy -step
end
define cy next
cy -next
end
document cy step
%s
end
document cy next
%s
end
""") % (CyStep.__doc__, CyNext.__doc__))
register_defines()
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
import sys
import os
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGDOptimizer
IS_SPARSE = True
USE_GPU = False
BATCH_SIZE = 256
def get_usr_combined_features():
# FIXME(dzh) : old API integer_value(10) may has range check.
# currently we don't have user configurated check.
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = layers.data(name='user_id', shape=[1], dtype='int64')
usr_emb = layers.embedding(
input=uid,
dtype='float32',
size=[USR_DICT_SIZE, 32],
param_attr='user_table',
is_sparse=IS_SPARSE)
usr_fc = layers.fc(input=usr_emb, size=32)
USR_GENDER_DICT_SIZE = 2
usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')
usr_gender_emb = layers.embedding(
input=usr_gender_id,
size=[USR_GENDER_DICT_SIZE, 16],
param_attr='gender_table',
is_sparse=IS_SPARSE)
usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")
usr_age_emb = layers.embedding(
input=usr_age_id,
size=[USR_AGE_DICT_SIZE, 16],
is_sparse=IS_SPARSE,
param_attr='age_table')
usr_age_fc = layers.fc(input=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")
usr_job_emb = layers.embedding(
input=usr_job_id,
size=[USR_JOB_DICT_SIZE, 16],
param_attr='job_table',
is_sparse=IS_SPARSE)
usr_job_fc = layers.fc(input=usr_job_emb, size=16)
concat_embed = layers.concat(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return usr_combined_features
def get_mov_combined_features():
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')
mov_emb = layers.embedding(
input=mov_id,
dtype='float32',
size=[MOV_DICT_SIZE, 32],
param_attr='movie_table',
is_sparse=IS_SPARSE)
mov_fc = layers.fc(input=mov_emb, size=32)
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = layers.data(
name='category_id', shape=[1], dtype='int64', lod_level=1)
mov_categories_emb = layers.embedding(
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_categories_hidden = layers.sequence_pool(
input=mov_categories_emb, pool_type="sum")
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = layers.data(
name='movie_title', shape=[1], dtype='int64', lod_level=1)
mov_title_emb = layers.embedding(
input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_title_conv = nets.sequence_conv_pool(
input=mov_title_emb,
num_filters=32,
filter_size=3,
act="tanh",
pool_type="sum")
concat_embed = layers.concat(
input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
# FIXME(dzh) : need tanh operator
mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return mov_combined_features
def model():
usr_combined_features = get_usr_combined_features()
mov_combined_features = get_mov_combined_features()
# need cos sim
inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
scale_infer = layers.scale(x=inference, scale=5.0)
label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=scale_infer, label=label)
avg_cost = layers.mean(square_cost)
return scale_infer, avg_cost
def train(use_cuda, save_dirname, is_local=True):
scale_infer, avg_cost = model()
# test program
test_program = fluid.default_main_program().clone(for_test=True)
sgd_optimizer = SGDOptimizer(learning_rate=0.2)
sgd_optimizer.minimize(avg_cost)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place)
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.movielens.train(), buf_size=8192),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.movielens.test(), batch_size=BATCH_SIZE)
feed_order = [
'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id',
'movie_title', 'score'
]
def train_loop(main_program):
exe.run(framework.default_startup_program())
feed_list = [
main_program.global_block().var(var_name) for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list, place)
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for batch_id, data in enumerate(train_reader()):
# train a mini-batch
outs = exe.run(program=main_program,
feed=feeder.feed(data),
fetch_list=[avg_cost])
out = np.array(outs[0])
if (batch_id + 1) % 10 == 0:
avg_cost_set = []
for test_data in test_reader():
avg_cost_np = exe.run(program=test_program,
feed=feeder.feed(test_data),
fetch_list=[avg_cost])
avg_cost_set.append(avg_cost_np[0])
break # test only 1 segment for speeding up CI
# get test avg_cost
test_avg_cost = np.array(avg_cost_set).mean()
if test_avg_cost < 6.0:
# if avg_cost less than 6.0, we think our code is good.
if save_dirname is not None:
fluid.io.save_inference_model(save_dirname, [
"user_id", "gender_id", "age_id", "job_id",
"movie_id", "category_id", "movie_title"
], [scale_infer], exe)
return
if math.isnan(float(out[0])):
sys.exit("got NaN loss, training failed.")
if is_local:
train_loop(fluid.default_main_program())
else:
port = os.getenv("PADDLE_PSERVER_PORT", "6174")
pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip...
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist) # ip:port,ip:port...
trainers = int(os.getenv("PADDLE_TRAINERS"))
current_endpoint = os.getenv("POD_IP") + ":" + port
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
if training_role == "PSERVER":
pserver_prog = t.get_pserver_program(current_endpoint)
pserver_startup = t.get_startup_program(current_endpoint,
pserver_prog)
exe.run(pserver_startup)
exe.run(pserver_prog)
elif training_role == "TRAINER":
train_loop(t.get_trainer_program())
def infer(use_cuda, save_dirname=None):
if save_dirname is None:
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be feeded
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# Use the first data from paddle.dataset.movielens.test() as input
assert feed_target_names[0] == "user_id"
# Use create_lod_tensor(data, recursive_sequence_lengths, place) API
# to generate LoD Tensor where `data` is a list of sequences of index
# numbers, `recursive_sequence_lengths` is the length-based level of detail
# (lod) info associated with `data`.
# For example, data = [[10, 2, 3], [2, 3]] means that it contains
# two sequences of indexes, of length 3 and 2, respectively.
# Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
# level of detail info, indicating that `data` consists of two sequences
# of length 3 and 2, respectively.
user_id = fluid.create_lod_tensor([[1]], [[1]], place)
assert feed_target_names[1] == "gender_id"
gender_id = fluid.create_lod_tensor([[1]], [[1]], place)
assert feed_target_names[2] == "age_id"
age_id = fluid.create_lod_tensor([[0]], [[1]], place)
assert feed_target_names[3] == "job_id"
job_id = fluid.create_lod_tensor([[10]], [[1]], place)
assert feed_target_names[4] == "movie_id"
movie_id = fluid.create_lod_tensor([[783]], [[1]], place)
assert feed_target_names[5] == "category_id"
category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place)
assert feed_target_names[6] == "movie_title"
movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]],
[[5]], place)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
results = exe.run(inference_program,
feed={
feed_target_names[0]: user_id,
feed_target_names[1]: gender_id,
feed_target_names[2]: age_id,
feed_target_names[3]: job_id,
feed_target_names[4]: movie_id,
feed_target_names[5]: category_id,
feed_target_names[6]: movie_title
},
fetch_list=fetch_targets,
return_numpy=False)
print("inferred score: ", np.array(results[0]))
def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
# Directory for saving the inference model
save_dirname = "recommender_system.inference.model"
train(use_cuda, save_dirname)
infer(use_cuda, save_dirname)
if __name__ == '__main__':
main(USE_GPU)
|
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
import collections
import threading
import grpc
from grpc import _common
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import logging_pool
from grpc.framework.foundation import stream
from grpc.framework.interfaces.face import face
# pylint: disable=too-many-return-statements
_DEFAULT_POOL_SIZE = 8
class _ServerProtocolContext(interfaces.GRPCServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def peer(self):
return self._servicer_context.peer()
def disable_next_response_compression(self):
pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
class _FaceServicerContext(face.ServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def is_active(self):
return self._servicer_context.is_active()
def time_remaining(self):
return self._servicer_context.time_remaining()
def add_abortion_callback(self, abortion_callback):
raise NotImplementedError(
'add_abortion_callback no longer supported server-side!')
def cancel(self):
self._servicer_context.cancel()
def protocol_context(self):
return _ServerProtocolContext(self._servicer_context)
def invocation_metadata(self):
return _common.to_cygrpc_metadata(
self._servicer_context.invocation_metadata())
def initial_metadata(self, initial_metadata):
self._servicer_context.send_initial_metadata(initial_metadata)
def terminal_metadata(self, terminal_metadata):
self._servicer_context.set_terminal_metadata(terminal_metadata)
def code(self, code):
self._servicer_context.set_code(code)
def details(self, details):
self._servicer_context.set_details(details)
def _adapt_unary_request_inline(unary_request_inline):
def adaptation(request, servicer_context):
return unary_request_inline(request,
_FaceServicerContext(servicer_context))
return adaptation
def _adapt_stream_request_inline(stream_request_inline):
def adaptation(request_iterator, servicer_context):
return stream_request_inline(request_iterator,
_FaceServicerContext(servicer_context))
return adaptation
class _Callback(stream.Consumer):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._terminated = False
self._cancelled = False
def consume(self, value):
with self._condition:
self._values.append(value)
self._condition.notify_all()
def terminate(self):
with self._condition:
self._terminated = True
self._condition.notify_all()
def consume_and_terminate(self, value):
with self._condition:
self._values.append(value)
self._terminated = True
self._condition.notify_all()
def cancel(self):
with self._condition:
self._cancelled = True
self._condition.notify_all()
def draw_one_value(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._values:
return self._values.pop(0)
elif self._terminated:
return None
else:
self._condition.wait()
def draw_all_values(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._terminated:
all_values = tuple(self._values)
self._values = None
return all_values
else:
self._condition.wait()
def _run_request_pipe_thread(request_iterator, request_consumer,
servicer_context):
thread_joined = threading.Event()
def pipe_requests():
for request in request_iterator:
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.consume(request)
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.terminate()
def stop_request_pipe(timeout): # pylint: disable=unused-argument
thread_joined.set()
request_pipe_thread = _common.CleanupThread(
stop_request_pipe, target=pipe_requests)
request_pipe_thread.start()
def _adapt_unary_unary_event(unary_unary_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_unary_event(request, callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
return callback.draw_all_values()[0]
return adaptation
def _adapt_unary_stream_event(unary_stream_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_stream_event(request, callback,
_FaceServicerContext(servicer_context))
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
def _adapt_stream_unary_event(stream_unary_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_unary_event(
callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
return callback.draw_all_values()[0]
return adaptation
def _adapt_stream_stream_event(stream_stream_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_stream_event(
callback, _FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
class _SimpleMethodHandler(
collections.namedtuple('_MethodHandler', (
'request_streaming', 'response_streaming', 'request_deserializer',
'response_serializer', 'unary_unary', 'unary_stream',
'stream_unary', 'stream_stream',)), grpc.RpcMethodHandler):
pass
def _simple_method_handler(implementation, request_deserializer,
response_serializer):
if implementation.style is style.Service.INLINE:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(
False, False, request_deserializer, response_serializer,
_adapt_unary_request_inline(implementation.unary_unary_inline),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(
False, True, request_deserializer, response_serializer, None,
_adapt_unary_request_inline(implementation.unary_stream_inline),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
_adapt_stream_request_inline(
implementation.stream_unary_inline),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_request_inline(
implementation.stream_stream_inline))
elif implementation.style is style.Service.EVENT:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(
False, False, request_deserializer, response_serializer,
_adapt_unary_unary_event(implementation.unary_unary_event),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(
False, True, request_deserializer, response_serializer, None,
_adapt_unary_stream_event(implementation.unary_stream_event),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(
True, False, request_deserializer, response_serializer, None,
None,
_adapt_stream_unary_event(implementation.stream_unary_event),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_stream_event(implementation.stream_stream_event))
def _flatten_method_pair_map(method_pair_map):
method_pair_map = method_pair_map or {}
flat_map = {}
for method_pair in method_pair_map:
method = _common.fully_qualified_method(method_pair[0], method_pair[1])
flat_map[method] = method_pair_map[method_pair]
return flat_map
class _GenericRpcHandler(grpc.GenericRpcHandler):
def __init__(self, method_implementations, multi_method_implementation,
request_deserializers, response_serializers):
self._method_implementations = _flatten_method_pair_map(
method_implementations)
self._request_deserializers = _flatten_method_pair_map(
request_deserializers)
self._response_serializers = _flatten_method_pair_map(
response_serializers)
self._multi_method_implementation = multi_method_implementation
def service(self, handler_call_details):
method_implementation = self._method_implementations.get(
handler_call_details.method)
if method_implementation is not None:
return _simple_method_handler(
method_implementation,
self._request_deserializers.get(handler_call_details.method),
self._response_serializers.get(handler_call_details.method))
elif self._multi_method_implementation is None:
return None
else:
try:
return None #TODO(nathaniel): call the multimethod.
except face.NoSuchMethodError:
return None
class _Server(interfaces.Server):
def __init__(self, grpc_server):
self._grpc_server = grpc_server
def add_insecure_port(self, address):
return self._grpc_server.add_insecure_port(address)
def add_secure_port(self, address, server_credentials):
return self._grpc_server.add_secure_port(address, server_credentials)
def start(self):
self._grpc_server.start()
def stop(self, grace):
return self._grpc_server.stop(grace)
def __enter__(self):
self._grpc_server.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._grpc_server.stop(None)
return False
def server(service_implementations, multi_method_implementation,
request_deserializers, response_serializers, thread_pool,
thread_pool_size):
generic_rpc_handler = _GenericRpcHandler(
service_implementations, multi_method_implementation,
request_deserializers, response_serializers)
if thread_pool is None:
effective_thread_pool = logging_pool.pool(_DEFAULT_POOL_SIZE
if thread_pool_size is None
else thread_pool_size)
else:
effective_thread_pool = thread_pool
return _Server(
grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)))
|
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fnmatch
import os
from mock import call
from mock import MagicMock
from mock import patch
import shotgun.config
import shotgun.driver
import shotgun.settings
from shotgun.test import base
class RunOut(object):
return_code = None
stderr = None
stdout = None
def __str__(self):
return str(self.stdout)
class TestDriver(base.BaseTestCase):
def test_driver_factory(self):
types = {
"file": "File",
"dir": "Dir",
"subs": "Subs",
"postgres": "Postgres",
"command": "Command"
}
for t, n in types.iteritems():
with patch("shotgun.driver.%s" % n) as mocked:
shotgun.driver.Driver.getDriver({"type": t}, None)
mocked.assert_called_with({"type": t}, None)
@patch('shotgun.driver.execute')
@patch('shotgun.driver.fabric.api.settings')
@patch('shotgun.driver.fabric.api.run')
def test_driver_command(self, mfabrun, mfabset, mexecute):
out = shotgun.driver.CommandOut()
out.stdout = "STDOUT"
out.return_code = "RETURN_CODE"
out.stderr = "STDERR"
runout = RunOut()
runout.stdout = "STDOUT"
runout.return_code = "RETURN_CODE"
runout.stderr = "STDERR"
mfabrun.return_value = runout
mexecute.return_value = ("RETURN_CODE", "STDOUT", "STDERR")
command = "COMMAND"
driver = shotgun.driver.Driver(
{"host": {"address": "remote_host"}}, None)
result = driver.command(command)
shotgun.driver.fabric.api.run.assert_called_with(command, pty=True)
self.assertEqual(result, out)
shotgun.driver.fabric.api.settings.assert_called_with(
host_string="remote_host", timeout=2, command_timeout=10,
warn_only=True, key_filename=None)
driver = shotgun.driver.Driver({}, None)
result = driver.command(command)
shotgun.driver.execute.assert_called_with(command)
self.assertEqual(result, out)
@patch('shotgun.driver.execute')
@patch('shotgun.driver.fabric.api.settings')
@patch('shotgun.driver.fabric.api.get')
def test_driver_get(self, mfabget, mfabset, mexecute):
mexecute.return_value = ("RETURN_CODE", "STDOUT", "STDERR")
remote_path = "/remote_dir/remote_file"
target_path = "/target_dir"
driver = shotgun.driver.Driver({
"host": {
"address": "remote_host",
"ssh-key": "path_to_key",
}
}, None)
driver.get(remote_path, target_path)
mexecute.assert_called_with('mkdir -p "{0}"'.format(target_path))
mfabget.assert_called_with(remote_path, target_path)
mfabset.assert_called_with(
host_string="remote_host", key_filename="path_to_key",
timeout=2, warn_only=True)
mexecute.reset_mock()
driver = shotgun.driver.Driver({}, None)
driver.get(remote_path, target_path)
self.assertEqual(mexecute.mock_calls, [
call('mkdir -p "{0}"'.format(target_path)),
call('cp -r "{0}" "{1}"'.format(remote_path, target_path))])
class TestFile(base.BaseTestCase):
@patch('shotgun.driver.Driver.get')
def test_snapshot(self, mget):
data = {
"type": "file",
"path": "/remote_dir/remote_file",
"host": {
"address": "remote_host",
},
}
conf = MagicMock()
conf.target = "/target"
file_driver = shotgun.driver.File(data, conf)
target_path = "/target/remote_host/remote_dir"
file_driver.snapshot()
mget.assert_called_with(data["path"], target_path)
@patch('shotgun.driver.remove_matched_files')
@patch('shotgun.driver.Driver.get')
def test_dir_exclude_called(self, mget, mremove):
data = {
"type": "dir",
"path": "/remote_dir/",
"exclude": ["*test"],
"host": {
"address": "remote_host",
},
}
conf = MagicMock()
conf.target = "/target"
dir_driver = shotgun.driver.Dir(data, conf)
target_path = "/target/remote_host/remote_dir"
dir_driver.snapshot()
mget.assert_called_with(data["path"], target_path)
mremove.assert_called_with(target_path, data['exclude'])
class TestSubs(base.BaseTestCase):
def setUp(self):
self.data = {
"type": "subs",
"path": "/remote_dir/remote_file",
"host": {
"address": "remote_host",
},
"subs": {
"line0": "LINE0",
"line1": "LINE1"
}
}
self.conf = MagicMock()
self.conf.target = "/target"
self.sedscript = MagicMock()
self.sedscript.name = "SEDSCRIPT"
self.sedscript.write = MagicMock()
@patch('shotgun.driver.tempfile.NamedTemporaryFile')
@patch('shotgun.driver.Driver.get')
@patch('shotgun.driver.execute')
def test_sed(self, mexecute, mget, mntemp):
mexecute.return_value = ("RETURN_CODE", "STDOUT", "STDERR")
mntemp.return_value = self.sedscript
subs_driver = shotgun.driver.Subs(self.data, self.conf)
subs_driver.sed("from_file", "to_file")
self.assertEqual(self.sedscript.write.mock_calls, [
call("s/{0}/{1}/g\n".format(old, new))
for old, new in self.data["subs"].iteritems()])
shotgun.driver.execute.assert_called_with(
"cat from_file | sed -f SEDSCRIPT", to_filename="to_file")
subs_driver.sed("from_file.gz", "to_file.gz")
shotgun.driver.execute.assert_called_with(
"cat from_file.gz | gunzip -c | sed -f SEDSCRIPT | gzip -c",
to_filename="to_file.gz")
subs_driver.sed("from_file.bz2", "to_file.bz2")
shotgun.driver.execute.assert_called_with(
"cat from_file.bz2 | bunzip2 -c | sed -f SEDSCRIPT | bzip2 -c",
to_filename="to_file.bz2")
@patch('shotgun.driver.os.walk')
@patch('shotgun.driver.Subs.sed')
@patch('shotgun.driver.Driver.get')
@patch('shotgun.driver.execute')
def test_snapshot(self, mexecute, mdriverget, msed, mwalk):
mexecute.return_value = ("RETURN_CODE", "STDOUT", "STDERR")
""" 1. Should get remote (or local) file (or directory)
2. Should put it into /target/host.domain.tld
3. Should walk through and check if files match given path pattern
4. If matched, sed them
"""
"""this return_value corresponds to the following structure
/target/remote_host/remote_dir/
/target/remote_host/remote_dir/remote_file
/target/remote_host/remote_dir/1
/target/remote_host/remote_dir/2
/target/remote_host/remote_dir/3/
/target/remote_host/remote_dir/3/4
/target/remote_host/remote_dir/3/5
/target/remote_host/remote_dir/3/6/
"""
mock_walk = [
(
'/target/remote_host/remote_dir',
['3'],
['1', '2', 'remote_file']
),
('/target/remote_host/remote_dir/3', ['6'], ['5', '4']),
('/target/remote_host/remote_dir/3/6', [], [])
]
mwalk.return_value = mock_walk
subs_driver = shotgun.driver.Subs(self.data, self.conf)
subs_driver.snapshot()
sed_calls = []
execute_calls = []
for root, _, files in mock_walk:
for filename in files:
fullfilename = os.path.join(root, filename)
# /target/remote_host
tgt_host = os.path.join(
self.conf.target, self.data["host"]["address"])
rel_tgt_host = os.path.relpath(fullfilename, tgt_host)
# /remote_dir/remote_file
match_orig_path = os.path.join("/", rel_tgt_host)
if not fnmatch.fnmatch(match_orig_path, self.data["path"]):
continue
tempfilename = "STDOUT"
execute_calls.append(call("mktemp"))
sed_calls.append(call(fullfilename, tempfilename))
execute_calls.append(
call('mv -f "{0}" "{1}"'.format(
tempfilename, fullfilename)))
self.assertEqual(msed.mock_calls, sed_calls)
self.assertEqual(mexecute.mock_calls, execute_calls)
|
|
from django.shortcuts import render
from django.template.response import TemplateResponse
from django.core.mail import EmailMessage
from django.contrib import messages
from django.conf import settings
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext, loader, Context
import os
from pygeocoder import Geocoder
from configurations import settings as conf_settings
from models import *
from healthcalc.models import *
from url_link.models import URLLinks
EMAIL_DIRS = settings.EMAIL_DIRS
atp_number = conf_settings["ATP_NUMBER"]
geo_data = settings.GEO_DATA
default_country = {'country_code': 'USA', 'country_lang': 'en'}
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_country_code(request):
import pygeoip
gi = pygeoip.GeoIP(geo_data)
country_ip = get_client_ip(request)
c_country = gi.country_name_by_addr(country_ip)
return c_country
def get_country(request):
country = get_country_code(request)
flpcountries = {'Australia':'AUS', 'Belgium':'BEL', 'Canada':'CAN',
'Czech':'CZE', 'Greece':'GRC', 'Ireland':'IRL', 'Luxembourg':'LUX',
'Netherlands':'NLD', 'New Zealand':'NZL', 'Northern Ireland':'ILN',
'Philippines':'PHL', 'Portugal':'PRT', 'Slovakia':'SVK', 'South Africa':'ZAF',
'Spain':'ESP', 'United Kingdom':'GBR', 'United States':'USA',
'Great Britain':'GBR', 'Malaysia':'MYS', 'Singapore':'SGP', 'Mexico':'MEX'}
flplanguage = {'Australia':'en', 'Belgium':'nl', 'Canada':'en',
'Czech':'cs', 'Greece':'el', 'Ireland':'en', 'Luxembourg':'de',
'Netherlands':'nl', 'New Zealand':'en', 'Northern Ireland':'en',
'Philippines':'en', 'Portugal':'pt', 'Slovakia':'sk', 'South Africa':'en',
'Spain':'es', 'United Kingdom':'en', 'United States':'en',
'Great Britain':'en', 'Malaysia':'en', 'Singapore':'en', 'Mexico':'es'}
try:
country_code = flpcountries[country]
country_lang = flplanguage[country]
current_country = "Your current country is " + country
except:
country_code = 'USA'
country_lang = 'en'
return {'country_code': country_code, 'country_lang': country_lang, 'current_country': current_country}
def getlatlong(request):
return str(request.GET.get('longitude')), str(request.GET.get('latitude'))
def home(request):
if request.method == 'POST':
f = open(EMAIL_DIRS, 'a')
f.write(request.POST['email'] + "\n")
f.close()
latlong = getlatlong(request)
print latlong
request.session["latlong"] = latlong
latitude=longitude=""
if latlong[0] != 'None':
longitude, latitude = getlatlong(request)
return render_to_response('ajax.html', dict(
country = (latitude + ',' + longitude),
atp_number = conf_settings["ATP_NUMBER"],
twitter = conf_settings["Twitter"],
email = conf_settings["Email"],
mobile = conf_settings["Mobile"],
linkedin = conf_settings["Linkedin"],
facebook = conf_settings["Facebook"],
youtube = conf_settings["Youtube"],
name = conf_settings["NAME"],
), context_instance=RequestContext(request))
else:
return render_to_response('home.html', dict(
country = (latitude + ',' + longitude),
home_id="current",
atp_number = conf_settings["ATP_NUMBER"],
twitter = conf_settings["Twitter"],
email = conf_settings["Email"],
mobile = conf_settings["Mobile"],
linkedin = conf_settings["Linkedin"],
facebook = conf_settings["Facebook"],
youtube = conf_settings["Youtube"],
name = conf_settings["NAME"],
), context_instance=RequestContext(request))
def about(request):
return render_to_response('about.html', dict(pages_id="current",), context_instance=RequestContext(request))
def contact(request):
return render_to_response('contact.html', dict(pages_id="current",), context_instance=RequestContext(request))
def faqs(request):
return render_to_response('faqs.html', dict(pages_id="current",), context_instance=RequestContext(request))
def opportunity(request):
return render_to_response('opportunity.html', dict(pages_id="current",), context_instance=RequestContext(request))
def benefits(request):
return render_to_response('benefits-of-aloe.html', dict(pages_id="current",), context_instance=RequestContext(request))
def products(request):
return render_to_response('forever-living-products.html', dict(pages_id="current",), context_instance=RequestContext(request))
def uses(request):
return render_to_response('aloe-vera-uses.html', dict(pages_id="current",), context_instance=RequestContext(request))
def stories(request):
return render_to_response('stories.html', dict(pages_id="current",), context_instance=RequestContext(request))
def weight_loss(request):
return render_to_response('weight-loss.html', dict(pages_id="current",), context_instance=RequestContext(request))
def drinks(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=DrinksR')
def nutrition(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=NutritionR')
def bee_products(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=Bee_ProductsR')
def weight_management(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=Weight_ManagementR')
def personal_care(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=Personal_CareR')
def skin_care(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=Skin_CareR')
def business_packs(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=Business_PacksR')
def combo_packs(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=Combo_PacksR')
def sonya_skin_care(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=Sonya_Skin_CareR')
def sonya_cosmetics(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=DrinksR')
def literature(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number)+'&categoryName=LiteraturesR')
def shop(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/retail/entry/Shop.do?language='+lang+'&store='+country+'&distribID='
+ str(atp_number))
def aloe_vera_gel(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/marketing/Product.do?code=015&language='+lang+'&store='+country+'&distribID='
+ str(atp_number))
def forever_daily(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/marketing/Product.do?code=439&language='+lang+'&store='+country+'&distribID='
+ str(atp_number))
def fabx(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/marketing/Product.do?code=440&language='+lang+'&store='+country+'&distribID='
+ str(atp_number))
def vital5(request):
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/marketing/Product.do?code=456&language='+lang+'&store='+country+'&distribID='
+ str(atp_number))
def join(request):
country_details = get_country(request)
country= country_details['country_code']
lang = country_details['country_lang']
return redirect('https://www.foreverliving.com/marketing/joinnow/applicationForm.do?action=display'
'&store='+country+'&langID='+lang+'&distribID='+ str(atp_number))
def get_products_url(request, url=None):
return render_to_response('urls.html', dict(pages_id="current", urls=ProductsURLs.objects.all()),
context_instance=RequestContext(request))
def urls_links(request, urls=None):
"""This is a dynamic views for a dynamic url """
country_details = get_country(request)
country = country_details['country_code']
lang = country_details['country_lang']
link = {}
urls_and_links = URLLinks.objects.filter(url=urls)
for url_link in urls_and_links:
item = str(url_link).split(' ')
link['url'] = str(item[0])
link['link'] = str(item[1])
if urls == link['url']:
return redirect(link['link'] + '&language='+lang+'&store='+country+'&distribID='+ str(atp_number))
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import re
import os
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
from resource_management.libraries.script import Script
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.version import compare_versions
from resource_management.libraries.functions.expect import expect
from ambari_commons.os_check import OSCheck
from ambari_commons.constants import AMBARI_SUDO_BINARY
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
dfs_type = default("/commandParams/dfs_type", "")
artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
jdk_name = default("/hostLevelParams/jdk_name", None)
java_home = config['hostLevelParams']['java_home']
java_version = expect("/hostLevelParams/java_version", int)
jdk_location = config['hostLevelParams']['jdk_location']
sudo = AMBARI_SUDO_BINARY
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
restart_type = default("/commandParams/restart_type", "")
version = default("/commandParams/version", None)
# Handle upgrade and downgrade
if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
stack_version_formatted = format_stack_version(version)
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
# Some datanode settings
dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
secure_dn_ports_are_in_use = False
def get_port(address):
"""
Extracts port from the address like 0.0.0.0:1019
"""
if address is None:
return None
m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
if m is not None:
return int(m.group(2))
else:
return None
def is_secure_port(port):
"""
Returns True if port is root-owned at *nix systems
"""
if port is not None:
return port < 1024
else:
return False
# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
# upgrades would cause these directories to have a version instead of "current"
# which would cause a lot of problems when writing out hadoop-env.sh; instead
# force the use of "current" in the hook
hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
hadoop_secure_dn_user = hdfs_user
hadoop_dir = "/etc/hadoop"
versioned_stack_root = '/usr/hdp/current'
hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
# HDP 2.2+ params
if Script.is_stack_greater_or_equal("2.2"):
mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
# not supported in HDP 2.2+
hadoop_conf_empty_dir = None
if not security_enabled:
hadoop_secure_dn_user = '""'
else:
dfs_dn_port = get_port(dfs_dn_addr)
dfs_dn_http_port = get_port(dfs_dn_http_addr)
dfs_dn_https_port = get_port(dfs_dn_https_addr)
# We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
if dfs_http_policy == "HTTPS_ONLY":
secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
elif dfs_http_policy == "HTTP_AND_HTTPS":
secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
else: # params.dfs_http_policy == "HTTP_ONLY" or not defined:
secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
if secure_dn_ports_are_in_use:
hadoop_secure_dn_user = hdfs_user
else:
hadoop_secure_dn_user = '""'
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
jsvc_path = "/usr/lib/bigtop-utils"
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
#users and groups
hbase_user = config['configurations']['hbase-env']['hbase_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
gmond_user = config['configurations']['ganglia-env']["gmond_user"]
tez_user = config['configurations']['tez-env']["tez_user"]
oozie_user = config['configurations']['oozie-env']["oozie_user"]
falcon_user = config['configurations']['falcon-env']["falcon_user"]
ranger_user = config['configurations']['ranger-env']["ranger_user"]
user_group = config['configurations']['cluster-env']['user_group']
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_namenode = not len(namenode_host) == 0
has_ganglia_server = not len(ganglia_server_hosts) == 0
has_tez = 'tez-site' in config['configurations']
has_hbase_masters = not len(hbase_master_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_falcon_server_hosts = not len(falcon_server_hosts) == 0
has_ranger_admin = not len(ranger_admin_hosts) == 0
if has_namenode or dfs_type == 'HCFS':
hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
hbase_tmp_dir = "/tmp/hbase-hbase"
proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
ranger_group = config['configurations']['ranger-env']['ranger_group']
dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
if has_hbase_masters:
hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
#repo params
repo_info = config['hostLevelParams']['repo_info']
service_repo_info = default("/hostLevelParams/service_repo_info",None)
user_to_groups_dict = collections.defaultdict(lambda:[user_group])
user_to_groups_dict[smoke_user] = [proxyuser_group]
if has_ganglia_server:
user_to_groups_dict[gmond_user] = [gmond_user]
user_to_groups_dict[gmetad_user] = [gmetad_user]
if has_tez:
user_to_groups_dict[tez_user] = [proxyuser_group]
if has_oozie_server:
user_to_groups_dict[oozie_user] = [proxyuser_group]
if has_falcon_server_hosts:
user_to_groups_dict[falcon_user] = [proxyuser_group]
if has_ranger_admin:
user_to_groups_dict[ranger_user] = [ranger_group]
user_to_gid_dict = collections.defaultdict(lambda:user_group)
user_list = json.loads(config['hostLevelParams']['user_list'])
group_list = json.loads(config['hostLevelParams']['group_list'])
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()
|
|
import os
import unittest
import ray
from ray.rllib import _register_all
from ray.tune.result import TIMESTEPS_TOTAL
from ray.tune import Trainable, TuneError
from ray.tune import register_trainable, run_experiments
from ray.tune.logger import Logger
from ray.tune.experiment import Experiment
from ray.tune.trial import Trial, ExportFormat
class RunExperimentTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testDict(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
trials = run_experiments({
"foo": {
"run": "f1",
},
"bar": {
"run": "f1",
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testExperiment(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
exp1 = Experiment(**{
"name": "foo",
"run": "f1",
})
[trial] = run_experiments(exp1)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testExperimentList(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
exp1 = Experiment(**{
"name": "foo",
"run": "f1",
})
exp2 = Experiment(**{
"name": "bar",
"run": "f1",
})
trials = run_experiments([exp1, exp2])
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testAutoregisterTrainable(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
class B(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
register_trainable("f1", train)
trials = run_experiments({
"foo": {
"run": train,
},
"bar": {
"run": B
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
def testCheckpointAtEnd(self):
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _save(self, path):
checkpoint = path + "/checkpoint"
with open(checkpoint, "w") as f:
f.write("OK")
return checkpoint
trials = run_experiments({
"foo": {
"run": train,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
def testExportFormats(self):
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
path = export_dir + "/exported"
with open(path, "w") as f:
f.write("OK")
return {export_formats[0]: path}
trials = run_experiments({
"foo": {
"run": train,
"export_formats": ["format"]
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(
os.path.exists(os.path.join(trial.logdir, "exported")))
def testInvalidExportFormats(self):
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
ExportFormat.validate(export_formats)
return {}
def fail_trial():
run_experiments({
"foo": {
"run": train,
"export_formats": ["format"]
}
})
self.assertRaises(TuneError, fail_trial)
def testCustomResources(self):
ray.shutdown()
ray.init(resources={"hi": 3})
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
trials = run_experiments({
"foo": {
"run": train,
"resources_per_trial": {
"cpu": 1,
"custom_resources": {
"hi": 2
}
}
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
def testCustomLogger(self):
class CustomLogger(Logger):
def on_result(self, result):
with open(os.path.join(self.logdir, "test.log"), "w") as f:
f.write("hi")
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"loggers": [CustomLogger]
}
})
self.assertTrue(os.path.exists(os.path.join(trial.logdir, "test.log")))
self.assertFalse(
os.path.exists(os.path.join(trial.logdir, "params.json")))
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
}
}
})
self.assertTrue(
os.path.exists(os.path.join(trial.logdir, "params.json")))
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"loggers": []
}
})
self.assertFalse(
os.path.exists(os.path.join(trial.logdir, "params.json")))
def testCustomTrialString(self):
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"trial_name_creator":
lambda t: "{}_{}_321".format(t.trainable_name, t.trial_id)
}
})
self.assertEquals(
str(trial), "{}_{}_321".format(trial.trainable_name,
trial.trial_id))
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
|
"""Support the ISY-994 controllers."""
from __future__ import annotations
import asyncio
from urllib.parse import urlparse
from aiohttp import CookieJar
import async_timeout
from pyisy import ISY, ISYConnectionError, ISYInvalidAuthError, ISYResponseParseError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.typing import ConfigType
from .const import (
_LOGGER,
CONF_IGNORE_STRING,
CONF_RESTORE_LIGHT_STATE,
CONF_SENSOR_STRING,
CONF_TLS_VER,
CONF_VAR_SENSOR_STRING,
DEFAULT_IGNORE_STRING,
DEFAULT_RESTORE_LIGHT_STATE,
DEFAULT_SENSOR_STRING,
DEFAULT_VAR_SENSOR_STRING,
DOMAIN,
ISY994_ISY,
ISY994_NODES,
ISY994_PROGRAMS,
ISY994_VARIABLES,
MANUFACTURER,
PLATFORMS,
PROGRAM_PLATFORMS,
)
from .helpers import _categorize_nodes, _categorize_programs, _categorize_variables
from .services import async_setup_services, async_unload_services
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.url,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_TLS_VER): vol.Coerce(float),
vol.Optional(
CONF_IGNORE_STRING, default=DEFAULT_IGNORE_STRING
): cv.string,
vol.Optional(
CONF_SENSOR_STRING, default=DEFAULT_SENSOR_STRING
): cv.string,
vol.Optional(
CONF_VAR_SENSOR_STRING, default=DEFAULT_VAR_SENSOR_STRING
): cv.string,
vol.Required(
CONF_RESTORE_LIGHT_STATE, default=DEFAULT_RESTORE_LIGHT_STATE
): bool,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the isy994 integration from YAML."""
isy_config: ConfigType | None = config.get(DOMAIN)
hass.data.setdefault(DOMAIN, {})
if not isy_config:
return True
# Only import if we haven't before.
config_entry = _async_find_matching_config_entry(hass)
if not config_entry:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=dict(isy_config),
)
)
return True
# Update the entry based on the YAML configuration, in case it changed.
hass.config_entries.async_update_entry(config_entry, data=dict(isy_config))
return True
@callback
def _async_find_matching_config_entry(hass):
for entry in hass.config_entries.async_entries(DOMAIN):
if entry.source == config_entries.SOURCE_IMPORT:
return entry
async def async_setup_entry(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Set up the ISY 994 integration."""
# As there currently is no way to import options from yaml
# when setting up a config entry, we fallback to adding
# the options to the config entry and pull them out here if
# they are missing from the options
_async_import_options_from_data_if_missing(hass, entry)
hass.data[DOMAIN][entry.entry_id] = {}
hass_isy_data = hass.data[DOMAIN][entry.entry_id]
hass_isy_data[ISY994_NODES] = {}
for platform in PLATFORMS:
hass_isy_data[ISY994_NODES][platform] = []
hass_isy_data[ISY994_PROGRAMS] = {}
for platform in PROGRAM_PLATFORMS:
hass_isy_data[ISY994_PROGRAMS][platform] = []
hass_isy_data[ISY994_VARIABLES] = []
isy_config = entry.data
isy_options = entry.options
# Required
user = isy_config[CONF_USERNAME]
password = isy_config[CONF_PASSWORD]
host = urlparse(isy_config[CONF_HOST])
# Optional
tls_version = isy_config.get(CONF_TLS_VER)
ignore_identifier = isy_options.get(CONF_IGNORE_STRING, DEFAULT_IGNORE_STRING)
sensor_identifier = isy_options.get(CONF_SENSOR_STRING, DEFAULT_SENSOR_STRING)
variable_identifier = isy_options.get(
CONF_VAR_SENSOR_STRING, DEFAULT_VAR_SENSOR_STRING
)
if host.scheme == "http":
https = False
port = host.port or 80
session = aiohttp_client.async_create_clientsession(
hass, verify_ssl=None, cookie_jar=CookieJar(unsafe=True)
)
elif host.scheme == "https":
https = True
port = host.port or 443
session = aiohttp_client.async_get_clientsession(hass)
else:
_LOGGER.error("The isy994 host value in configuration is invalid")
return False
# Connect to ISY controller.
isy = ISY(
host.hostname,
port,
username=user,
password=password,
use_https=https,
tls_ver=tls_version,
webroot=host.path,
websession=session,
use_websocket=True,
)
try:
async with async_timeout.timeout(60):
await isy.initialize()
except asyncio.TimeoutError as err:
raise ConfigEntryNotReady(
f"Timed out initializing the ISY; device may be busy, trying again later: {err}"
) from err
except ISYInvalidAuthError as err:
_LOGGER.error(
"Invalid credentials for the ISY, please adjust settings and try again: %s",
err,
)
return False
except ISYConnectionError as err:
raise ConfigEntryNotReady(
f"Failed to connect to the ISY, please adjust settings and try again: {err}"
) from err
except ISYResponseParseError as err:
raise ConfigEntryNotReady(
f"Invalid XML response from ISY; Ensure the ISY is running the latest firmware: {err}"
) from err
_categorize_nodes(hass_isy_data, isy.nodes, ignore_identifier, sensor_identifier)
_categorize_programs(hass_isy_data, isy.programs)
_categorize_variables(hass_isy_data, isy.variables, variable_identifier)
# Dump ISY Clock Information. Future: Add ISY as sensor to Hass with attrs
_LOGGER.info(repr(isy.clock))
hass_isy_data[ISY994_ISY] = isy
await _async_get_or_create_isy_device_in_registry(hass, entry, isy)
# Load platforms for the devices in the ISY controller that we support.
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
@callback
def _async_stop_auto_update(event) -> None:
"""Stop the isy auto update on Home Assistant Shutdown."""
_LOGGER.debug("ISY Stopping Event Stream and automatic updates")
isy.websocket.stop()
_LOGGER.debug("ISY Starting Event Stream and automatic updates")
isy.websocket.start()
entry.async_on_unload(entry.add_update_listener(_async_update_listener))
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_stop_auto_update)
)
# Register Integration-wide Services:
async_setup_services(hass)
return True
async def _async_update_listener(
hass: HomeAssistant, entry: config_entries.ConfigEntry
):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
@callback
def _async_import_options_from_data_if_missing(
hass: HomeAssistant, entry: config_entries.ConfigEntry
):
options = dict(entry.options)
modified = False
for importable_option in (
CONF_IGNORE_STRING,
CONF_SENSOR_STRING,
CONF_RESTORE_LIGHT_STATE,
):
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, options=options)
@callback
def _async_isy_to_configuration_url(isy: ISY) -> str:
"""Extract the configuration url from the isy."""
connection_info = isy.conn.connection_info
proto = "https" if "tls" in connection_info else "http"
return f"{proto}://{connection_info['addr']}:{connection_info['port']}"
async def _async_get_or_create_isy_device_in_registry(
hass: HomeAssistant, entry: config_entries.ConfigEntry, isy
) -> None:
device_registry = await dr.async_get_registry(hass)
url = _async_isy_to_configuration_url(isy)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, isy.configuration["uuid"])},
identifiers={(DOMAIN, isy.configuration["uuid"])},
manufacturer=MANUFACTURER,
name=isy.configuration["name"],
model=isy.configuration["model"],
sw_version=isy.configuration["firmware"],
configuration_url=url,
)
async def async_unload_entry(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
hass_isy_data = hass.data[DOMAIN][entry.entry_id]
isy = hass_isy_data[ISY994_ISY]
def _stop_auto_update() -> None:
"""Stop the isy auto update."""
_LOGGER.debug("ISY Stopping Event Stream and automatic updates")
isy.websocket.stop()
await hass.async_add_executor_job(_stop_auto_update)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
async_unload_services(hass)
return unload_ok
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import traceback
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import local
from nova.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
It does *not* apply to the message payload, which must be versioned
independently. For example, when using rpc APIs, a version number is applied
for changes to the API being exposed over rpc. This version number is handled
in the rpc proxy and dispatcher modules.
This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
The current message format (version 2.0) is very simple. It is:
{
'oslo.version': <RPC Envelope Version as a String>,
'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
without a message envelope.
So, the current message envelope just includes the envelope version. It may
eventually contain additional information, such as a signature for the message
payload.
We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
_RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
class RPCException(Exception):
message = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.message % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.message
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
"""Signifies that a remote class has raised an exception.
Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevant info.
"""
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type=exc_type,
value=value,
traceback=traceback)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
message = _('Timeout while waiting on RPC response - '
'topic: "%(topic)s", RPC method: "%(method)s" '
'info: "%(info)s"')
def __init__(self, info=None, topic=None, method=None):
"""
:param info: Extra info to convey to the user
:param topic: The topic that the rpc call was sent to
:param rpc_method_name: The name of the rpc method being
called
"""
self.info = info
self.topic = topic
self.method = method
super(Timeout, self).__init__(
None,
info=info or _('<unknown>'),
topic=topic or _('<unknown>'),
method=method or _('<unknown>'))
class DuplicateMessageError(RPCException):
message = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException):
message = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
message = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
message = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
bus. The consumer will read messages from the queue, unpack them, and
dispatch them to the proxy object. The contents of the message pulled
off of the queue will determine which method gets called on the proxy
object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
from a queue called "compute". In that case, the
messages will get distributed amongst the consumers in a
round-robin fashion if fanout=False. If fanout=True,
every consumer associated with this topic will get a
copy of every message.
:param proxy: The object that will handle all incoming messages.
:param fanout: Whether or not this is a fanout topic. See the
documentation for the topic parameter for some
additional comments on this.
"""
raise NotImplementedError()
def create_worker(self, topic, proxy, pool_name):
"""Create a worker on this connection.
A worker is like a regular consumer of messages directed to a
topic, except that it is part of a set of such consumers (the
"pool") which may run in parallel. Every pool of workers will
receive a given message, but only one worker in the pool will
be asked to process it. Load is distributed across the members
of the pool in round-robin fashion.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic.
:param proxy: The object that will handle all incoming messages.
:param pool_name: String containing the name of the pool of workers
"""
raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
:param callback: Callable to be invoked for each message.
:type callback: callable accepting one argument
:param pool_name: The name of the consumer pool.
:type pool_name: str
:param topic: The routing topic for desired messages.
:type topic: str
:param exchange_name: The name of the message exchange where
the client should attach. Defaults to
the configured exchange.
:type exchange_name: str
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
Spawn a thread that will be responsible for handling all incoming
messages for consumers that were set up on this connection.
Message dispatching inside of this is expected to be implemented in a
non-blocking manner. An example implementation would be having this
thread pull messages in for all of the consumers, but utilize a thread
pool for dispatching the messages to the proxy objects.
"""
raise NotImplementedError()
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
SANITIZE = {'set_admin_password': [('args', 'new_pass')],
'run_instance': [('args', 'admin_password')],
'route_message': [('args', 'message', 'args', 'method_info',
'method_kwargs', 'password'),
('args', 'message', 'args', 'method_info',
'method_kwargs', 'admin_password')]}
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
has_context_token = '_context_auth_token' in msg_data
has_token = 'auth_token' in msg_data
if not any([has_method, has_context_token, has_token]):
return log_func(msg, msg_data)
msg_data = copy.deepcopy(msg_data)
if has_method:
for arg in SANITIZE.get(msg_data['method'], []):
try:
d = msg_data
for elem in arg[:-1]:
d = d[elem]
d[arg[-1]] = '<SANITIZED>'
except KeyError, e:
LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
{'item': arg,
'err': e})
if has_context_token:
msg_data['_context_auth_token'] = '<SANITIZED>'
if has_token:
msg_data['auth_token'] = '<SANITIZED>'
return log_func(msg, msg_data)
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"), unicode(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
data = {
'class': str(failure.__class__.__name__),
'module': str(failure.__class__.__module__),
'message': unicode(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(conf, data):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module not in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
class CommonRpcContext(object):
def __init__(self, **kwargs):
self.values = kwargs
def __getattr__(self, key):
try:
return self.values[key]
except KeyError:
raise AttributeError(key)
def to_dict(self):
return copy.deepcopy(self.values)
@classmethod
def from_dict(cls, values):
return cls(**values)
def deepcopy(self):
return self.from_dict(self.to_dict())
def update_store(self):
local.store.context = self
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
# TODO(russellb) This method is a bit of a nova-ism. It makes
# some assumptions about the data in the request context sent
# across rpc, while the rest of this class does not. We could get
# rid of this if we changed the nova code that uses this to
# convert the RpcContext back to its native RequestContext doing
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
context = self.deepcopy()
context.values['is_admin'] = True
context.values.setdefault('roles', [])
if 'admin' not in context.values['roles']:
context.values['roles'].append('admin')
if read_deleted is not None:
context.values['read_deleted'] = read_deleted
return context
class ClientException(Exception):
"""This encapsulates some actual exception that is expected to be
hit by an RPC proxy object. Merely instantiating it records the
current exception information, which will be passed back to the
RPC client without exceptional logging."""
def __init__(self):
self._exc_info = sys.exc_info()
def catch_client_exception(exceptions, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception, e:
if type(e) in exceptions:
raise ClientException()
else:
raise
def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer."""
def outer(func):
def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer
def version_is_compatible(imp_version, version):
"""Determine whether versions are compatible.
:param imp_version: The version implemented
:param version: The version requested by an incoming message.
"""
version_parts = version.split('.')
imp_version_parts = imp_version.split('.')
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
return False
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
return False
return True
def serialize_msg(raw_msg):
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
return msg
def deserialize_msg(msg):
# NOTE(russellb): Hang on to your hats, this road is about to
# get a little bumpy.
#
# Robustness Principle:
# "Be strict in what you send, liberal in what you accept."
#
# At this point we have to do a bit of guessing about what it
# is we just received. Here is the set of possibilities:
#
# 1) We received a dict. This could be 2 things:
#
# a) Inspect it to see if it looks like a standard message envelope.
# If so, great!
#
# b) If it doesn't look like a standard message envelope, it could either
# be a notification, or a message from before we added a message
# envelope (referred to as version 1.0).
# Just return the message as-is.
#
# 2) It's any other non-dict type. Just return it and hope for the best.
# This case covers return values from rpc.call() from before message
# envelopes were used. (messages to call a method were always a dict)
if not isinstance(msg, dict):
# See #2 above.
return msg
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
if not all(map(lambda key: key in msg, base_envelope_keys)):
# See #1.b above.
return msg
# At this point we think we have the message envelope
# format we were expecting. (#1.a above)
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
return raw_msg
|
|
from __future__ import division
import os
import shutil
from subprocess import call
import itertools
import fnmatch
from calendar import monthrange
def generate_bulktraj(basename, hysplit_working, output_dir, meteo_dir, years,
months, hours, altitudes, coordinates, run,
meteoyr_2digits=True, outputyr_2digits=False,
monthslice=slice(0, 32, 1), meteo_bookends=([4, 5], [1]),
get_reverse=False, get_clipped=False,
hysplit="C:\\hysplit4\\exec\\hyts_std"):
"""
Generate sequence of trajectories within given time frame(s).
Run bulk sequence of HYSPLIT simulations over a given time and at different
altitudes (likely in meters above ground level). Uses either weekly or
semi-monthly data with the filename format of *mon*YY*# or *mon*YYYY*#.
Results are written to ``output_dir``.
This does not set along-trajectory meteorological output- edit SETUP.CFG
in the HYSPLIT working directory or in the HYSPLIT4 GUI to reflect
desired output variables.
Absolute paths strongly recommended over relative paths.
Parameters
----------
basename : string
Base for all files output in this run
hysplit_working : string
Absolute or relative path to the HYSPLIT working directory.
output_dir : string
Absolute or relative path to the desired output directory.
meteo_dir : string
Absolute or relative path to the location of the meteorology files.
years : list of ints
The year(s) to run simulations
months : list of ints
The month(s) to run simulations
hours : list of ints
Parcel launching times in UTC.
altitudes : list of ints
The altitudes (usually meters above ground level) from which
parcels will be launched. Must be less than model top (10000 m)
coordinates : tuple of floats
The parcel (latitude, longitude) launch location in decimal degrees.
run : int
Length in hours of simulation. To calculate back trajectories,
``run`` must be negative.
meteoyr_2digits : Boolean
Default True. Indicates whether to search for meteorology files using
the last 2 or all 4 digits of the years. Must set to False if have
multiple decades of meteorology files in meteo_dir.
outputyr_2digits : Boolean
Default False. Old behavior == True. The number of digits (2 or 4) to
use to identify year in trajectory filename. Must keep as False if
wish PySPLIT to correctly identify non-21st century trajectories later
monthslice : slice object
Default slice(0, 32, 1). Slice to apply to range of days in month.
Use to target particular day or range of days, every x number of days,
etc. NOTE: slice is 0 indexed, days start with 1. For example,
slice(0, 32, 2) will yield every odd day.
meteo_bookends : tuple of lists of ints
Default ([4, 5], [1]). To calculate a month of trajectories, files
from the previous and month must be included. The default is optimized
for weekly meteorology and indicates that weeks 4 and 5 from the
previous month and the first week of the next month must be included
to run the entire current month of trajectories. The user is
responsible for making sure the correct bookends for their trajectory
length and meteorology file periods are provided.
get_reverse : Boolean
Default ``False``. If ``True``, then from the last point of each
trajectory a new parcel will be launched in the opposite direction.
These reverse trajectories are stored in a subfolder in ``output_dir``
get_clipped : Boolean
Default ``False``. If ``True``, takes a trajectory file and
outputs a version of the file containing only path information.
Provided to support clustering of trajectories with multiline data,
which was produced in HSYPLI versions prior to January 2017 (854)
when more than 7 along-trajectory output variables were selected.
hysplit : string
Default "C:\\hysplit4\\exec\\hyts_std". The location of the "hyts_std"
executable that generates trajectories. This is the default location
for a typical PC installation of HYSPLIT
"""
# Set year formatting in 3 places
yr_is2digits = {True : _year2string,
False : str}
controlyearfunc = yr_is2digits[True]
meteoyearfunc = yr_is2digits[meteoyr_2digits]
fnameyearfunc = yr_is2digits[outputyr_2digits]
if outputyr_2digits is False or meteoyr_2digits is False:
for year in years:
if len(str(year)) != 4:
raise ValueError("%d is not a valid year for given" \
" meteoyr_2digits, outputyr_2digits" %year)
controlfname = 'CONTROL'
# Get directory information, make directories if necessary
cwd = os.getcwd()
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
output_rdir = os.path.join(output_dir, 'reversetraj')
output_cdir = os.path.join(output_dir, 'clippedtraj')
meteo_dir = meteo_dir.replace('\\', '/')
if get_reverse and not os.path.isdir(output_rdir):
os.mkdir(os.path.join(output_rdir))
if get_clipped and not os.path.isdir(output_cdir):
os.mkdir(os.path.join(output_cdir))
# Initialize dictionary of months, seasons
n_hemisphere = True
if coordinates[0] < 0:
n_hemisphere = False
mon_dict = _mondict(n_hem=n_hemisphere)
try:
os.chdir(hysplit_working)
# Iterate over years and months
for y, m in itertools.product(years, months):
season = mon_dict[m][0]
m_str = mon_dict[m][1]
m_len = monthrange(y, m)[1]
days = range(1, m_len + 1)[monthslice]
# Assemble list of meteorology files
meteofiles = _meteofinder(meteo_dir, meteo_bookends, m, y,
mon_dict, meteoyearfunc)
controlyr = controlyearfunc(y)
fnameyr = fnameyearfunc(y)
# Iterate over days, hours, altitudes
for d, h, a in itertools.product(days, hours, altitudes):
# Add timing and altitude to basename to create unique name
trajname = (basename + m_str + '{:04}'.format(a) + season +
fnameyr + "{0:02}{1:02}{2:02}".format(m, d, h))
final_trajpath = os.path.join(output_dir, trajname)
# Remove any existing CONTROL or temp files
_try_to_remove(controlfname)
_try_to_remove(trajname)
_try_to_remove(final_trajpath)
# Populate CONTROL file with trajectory initialization data
_populate_control(coordinates, controlyr, m, d, h, a, meteo_dir,
meteofiles, run, controlfname, trajname)
# Call executable to calculate trajectory
call(hysplit)
# Generate reverse and/or clipped trajectories, if indicated
if get_reverse:
_reversetraj_whilegen(trajname, run, hysplit, output_rdir,
meteo_dir, meteofiles, controlfname)
if get_clipped:
_cliptraj(output_cdir, trajname)
# Move the trajectory file to output directory
shutil.move(trajname, final_trajpath)
# Revert current working directory
finally:
os.chdir(cwd)
def _reversetraj_whilegen(trajname, run, hysplit, output_rdir, meteo_dir,
meteofiles, controlfname):
"""
Calculate reverse trajectory during main trajectory generation.
Calculates a new trajectory ('reverse trajectory') from the endpoint of the
trajectory just calculated in the main generation sequence and running
in the opposite direction.
Parameters
----------
trajname : string
The file name of the just-calculated trajectory. New backwards
trajectory will be named ``trajname`` + 'REVERSE'
run : int
The length in hours of the trajectory simulation
hysplit : string
The location of the executable that calculates trajectories
output_rdir : string
The subdirectory for reverse trajectories.
meteo_dir : string
The location of the meteorology files
meteofiles : string
The list of meteorology files required to calculate the
reverse trajectory.
controlfname : string
The name of the control file, which should be 'CONTROL'
"""
# Initialize name, path
reversetrajname = trajname + 'REVERSE'
final_rtrajpath = os.path.join(output_rdir, reversetrajname)
with open(trajname) as traj:
contents = traj.readlines()
last_timepoint = -1
# for multiline files, critical information is in contents[-2]
if len(contents[-1]) < len(contents[-2]):
last_timepoint = -2
data = contents[last_timepoint].split()
# Get reverse trajectory start information
year = int(data[2])
mon = int(data[3])
day = int(data[4])
hour = int(data[5])
lat = float(data[9])
lon = float(data[10])
alt = float(data[11])
run = run * -1
# Sometimes start height is greater than 10000 m model top
if alt >= 10000:
alt = 9999
# Always 2 digit year for CONTROL
yr = '{:02}'.format(year)
# Remove (if present) any existing CONTROL or temp files
_try_to_remove(controlfname)
_try_to_remove(reversetrajname)
_try_to_remove(final_rtrajpath)
# Populate control text
_populate_control((lat, lon), yr, mon, day, hour, alt, meteo_dir,
meteofiles, run, controlfname, reversetrajname)
# Call executable
call(hysplit)
# Move the trajectory file to the desired output directory
shutil.move(reversetrajname, final_rtrajpath)
def _cliptraj(output_cdir, trajname):
"""
Create clipped trajectory file from original file.
Creates a new trajectory file containing only header and path information
from a newly generated trajectory. Only necessary if files are multiline.
Parameters
----------
output_cdir : string
Full or relative path to clipped trajectory output directory
trajname : string
Name of trajectory file to clip. New file will be named ``trajname`` +
'CLIPPED'
"""
# Initialize name, path, data list
clippedtrajname = trajname + 'CLIPPED'
final_ctrajpath = os.path.join(output_cdir, clippedtrajname)
clipdata = []
with open(trajname) as original:
# Read in all lines of file
contents = original.readlines()
# Initialize markers
skip = False
atdata = False
multiline = False
# Iterate through lines
for ind, line in enumerate(contents):
# Skip line only triggered if multiline, after at data
if skip:
skip = False
continue
# Once at data, only need first 92 char of line(s), append data
if atdata:
clipdata.append(line[:92] + '\n')
if multiline:
skip = True
continue
# PRESSURE marker tripped first
if 'PRESSURE' in line:
if len(contents[ind + 1]) > len(contents[ind + 2]):
multiline = True
# Append last line of header, now at data
clipdata.append(line[:15] + '\n')
atdata = True
continue
# Append header data as is
clipdata.append(line)
# Get rid of temporary files and files with the same path
_try_to_remove(clippedtrajname)
_try_to_remove(final_ctrajpath)
# Write data to file and move
with open(clippedtrajname, 'w') as ctraj:
ctraj.writelines(clipdata)
shutil.move(clippedtrajname, final_ctrajpath)
def _meteofinder(meteo_dir, meteo_bookends, mon, year, mon_dict,
meteoyearfunc):
"""
Get list of meteorology files.
Creates list of files in storage location ``meteo_dir`` that belong
to the given month and year, plus the necessary files from previous
and the next months (``meteo_bookends``).
For successful meteofinding, separate different meteorology types into
different folders and name weekly or semi-monthly files according to the
following convention:
*mon*YY*#
where the * represents a Bash wildcard.
Parameters
----------
meteo_dir : string
Full or relative path to the location of the meteorology files.
meteo_bookends : tuple of lists of ints
To calculate a month of trajectories, files from the previous and next
month must be included. This indicates which file numbers from the
previous month and which from the next month are necessary.
The user is responsible for making sure the correct bookends for their
trajectory length and meteorology file periods are provided.
mon : int
The integer representation of the current month. Converted to a
3-letter string to find meteorology files.
year : int
The integer representation of the current year. Converted to a length
2 string to find meteorology files.
mon_dict : dictionary
Dictionary keyed by month integer, with lists of [season, mon]
meteoyearfunc : function
Function that formats the year string to length 2 or 4 to identify
appropriate meteorology files
Returns
-------
meteofiles : list of strings
List of strings representing the names of the required
meteorology files
"""
# Current working directory set in generate_bulktraj() environment
orig_dir = os.getcwd()
# Initialize lists, count
meteofiles = []
file_number = -1
# Get the strings that will match files for the previous, next,
# and current months
prv, nxt, now = _monyearstrings(mon, year, mon_dict, meteoyearfunc)
# Change directory and walk through files
try:
os.chdir(meteo_dir)
_, _, files = next(os.walk('.'))
# Order of files to CONTROL doesn't matter
for each_file in files:
if fnmatch.fnmatch(each_file, now):
meteofiles.append(each_file)
elif fnmatch.fnmatch(each_file, prv):
if int(each_file[file_number]) in meteo_bookends[0]:
meteofiles.append(each_file)
elif fnmatch.fnmatch(each_file, nxt):
if int(each_file[file_number]) in meteo_bookends[1]:
meteofiles.append(each_file)
finally:
os.chdir(orig_dir)
num_files = len(meteofiles)
if num_files == 0:
raise OSError('0 files found for month/year %(mon)d / %(year)d'
%{'mon': mon, 'year': year})
if num_files > 12:
print(meteofiles)
raise OSError('%(f)d files found for month/year %(mon)d / %(year)d.'\
' Maximum 12 allowed. If wrong years are included, '\
'identify files by 4 digit years (meteoyr_2digits=True).'\
' May require renaming meteorology files.'
%{'f': num_files, 'mon': mon, 'year': year})
return meteofiles
def _populate_control(coords, year, month, day, hour, alt,
meteo_dir, meteofiles, run, controlfname, trajname):
"""
Initialize and write CONTROL text to file (called CONTROL).
Parameters
----------
coordinates : tuple of floats
The parcel (latitude, longitude) launch location in decimal degrees.
years : list of ints
The year of the simulation
months : list of ints
The month of the simulation
hours : list of ints
Parcel launching times in UTC.
alt : int
The altitude (usually meters above ground level) from which
parcel will be launched. Must be less than model top (10000 m)
meteo_dir : string
Full or relative path to the location of the meteorology files.
meteofiles : list of strings
List of strings representing the names of the required
meteorology files
run : int
Length in hours of simulation.
controlfname : string
The name of the control file, which should be 'CONTROL'
trajname : string
The intended name of the trajectory file
"""
controltext = [year + " {0:02} {1:02} {2:02}\n".format(month, day, hour),
"1\n",
"{0!s} {1!s} {2!s}\n".format(coords[0], coords[1], alt),
"{0!s}\n".format(run),
"0\n",
"10000.0\n",
"{0!s}\n".format(len(meteofiles))]
for fname in meteofiles:
controltext.append("{0}/\n".format(meteo_dir))
controltext.append("{0}\n".format(fname))
controltext.append("./\n")
controltext.append("{0}\n".format(trajname))
with open(controlfname, 'w') as control:
control.writelines(controltext)
def _year2string(year):
"""
Helper function, takes a four digit integer year, makes a length-2 string.
Parameters
----------
year : int
The year.
Returns
-------
Length-2 string representation of ``year``
"""
return '{0:02}'.format(year % 100)
def _monyearstrings(mon, year, mon_dict, meteoyearfunc):
"""
Increment the months and potentially the years.
Assemble the strings that will allow ``_meteofinder`` to get correct files.
Parameters
----------
mon : int
Integer representation of the month
year : int
Integer representation of the year
mon_dict : dictionary
Dictionary keyed by month integer, with lists of [season, mon]
meteoyearfunc : function
Function that formats the year string to length 2 or 4 to identify
appropriate meteorology files
Returns
-------
prv : string
Signature for gathering the meteorology files for the previous month
nxt : string
Signature for gathering the meteorology files for the next month
now : string
Signature for gathering the meteorology files for the current month
"""
next_year = year
prev_year = year
next_mon = mon + 1
prev_mon = mon - 1
if prev_mon == 0:
prev_mon = 12
prev_year = year - 1
if next_mon == 13:
next_mon = 1
next_year = year + 1
w = '*'
prv = w + mon_dict[prev_mon][1] + w + meteoyearfunc(prev_year) + w
nxt = w + mon_dict[next_mon][1] + w + meteoyearfunc(next_year) + w
now = w + mon_dict[mon][1] + w + meteoyearfunc(year) + w
return prv, nxt, now
def _mondict(n_hem=True):
"""
Get a dictionary of season and month string.
Parameters
----------
n_hem : Boolean
Default True. Indicates hemisphere of parcel launch and thus
actual season.
Returns
-------
season_month_dict : dictionary
Dictionary keyed by month integer, with lists of [season, mon]
"""
if n_hem:
season_month_dict = {12: ['winter', 'dec'],
1 : ['winter', 'jan'],
2 : ['winter', 'feb'],
3 : ['spring', 'mar'],
4 : ['spring', 'apr'],
5 : ['spring', 'may'],
6 : ['summer', 'jun'],
7 : ['summer', 'jul'],
8 : ['summer', 'aug'],
9 : ['autumn', 'sep'],
10: ['autumn', 'oct'],
11: ['autumn', 'nov']}
else:
season_month_dict = {12: ['summer', 'dec'],
1 : ['summer', 'jan'],
2 : ['summer', 'feb'],
3 : ['autumn', 'mar'],
4 : ['autumn', 'apr'],
5 : ['autumn', 'may'],
6 : ['winter', 'jun'],
7 : ['winter', 'jul'],
8 : ['winter', 'aug'],
9 : ['spring', 'sep'],
10: ['spring', 'oct'],
11: ['spring', 'nov']}
return season_month_dict
def _try_to_remove(string):
"""
Check if file exists, and either remove it or pass.
Parameters
----------
string : string
Name of file to attempt to remove
"""
try:
os.remove(string)
except OSError:
pass
def _day2filenum(interval, day):
"""
Convert a date to corresponding file number.
Results depend on file interval- weekly, daily, semi-monthly.
Parameters
----------
interval : string
The file interval. Daily, weekly, or semi-monthly accepted,
represented by lower case first letter.
day : string
A number indicating the date.
Returns
-------
filenum : string
The number of the file within the month of meteorology.
"""
if interval == 'w':
filenum = str(((int(day) - 1) // 7) + 1)
elif interval == 's':
filenum = str(((int(day) - 1) // 15) + 1)
elif interval == 'd' or interval == 'm':
filenum = day
else:
raise ValueError('Meteorology interval not recognized')
return filenum
|
|
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Dict, Iterator, List, Union, Optional, Callable, ContextManager
import os
import tempfile
from test_driver.logger import rootlog
from test_driver.machine import Machine, NixStartScript, retry
from test_driver.vlan import VLan
from test_driver.polling_condition import PollingCondition
def get_tmp_dir() -> Path:
"""Returns a temporary directory that is defined by TMPDIR, TEMP, TMP or CWD
Raises an exception in case the retrieved temporary directory is not writeable
See https://docs.python.org/3/library/tempfile.html#tempfile.gettempdir
"""
tmp_dir = Path(tempfile.gettempdir())
tmp_dir.mkdir(mode=0o700, exist_ok=True)
if not tmp_dir.is_dir():
raise NotADirectoryError(
"The directory defined by TMPDIR, TEMP, TMP or CWD: {0} is not a directory".format(
tmp_dir
)
)
if not os.access(tmp_dir, os.W_OK):
raise PermissionError(
"The directory defined by TMPDIR, TEMP, TMP, or CWD: {0} is not writeable".format(
tmp_dir
)
)
return tmp_dir
class Driver:
"""A handle to the driver that sets up the environment
and runs the tests"""
tests: str
vlans: List[VLan]
machines: List[Machine]
polling_conditions: List[PollingCondition]
def __init__(
self,
start_scripts: List[str],
vlans: List[int],
tests: str,
out_dir: Path,
keep_vm_state: bool = False,
):
self.tests = tests
self.out_dir = out_dir
tmp_dir = get_tmp_dir()
with rootlog.nested("start all VLans"):
self.vlans = [VLan(nr, tmp_dir) for nr in vlans]
def cmd(scripts: List[str]) -> Iterator[NixStartScript]:
for s in scripts:
yield NixStartScript(s)
self.polling_conditions = []
self.machines = [
Machine(
start_command=cmd,
keep_vm_state=keep_vm_state,
name=cmd.machine_name,
tmp_dir=tmp_dir,
callbacks=[self.check_polling_conditions],
out_dir=self.out_dir,
)
for cmd in cmd(start_scripts)
]
def __enter__(self) -> "Driver":
return self
def __exit__(self, *_: Any) -> None:
with rootlog.nested("cleanup"):
for machine in self.machines:
machine.release()
def subtest(self, name: str) -> Iterator[None]:
"""Group logs under a given test name"""
with rootlog.nested(name):
try:
yield
return True
except Exception as e:
rootlog.error(f'Test "{name}" failed with error: "{e}"')
raise e
def test_symbols(self) -> Dict[str, Any]:
@contextmanager
def subtest(name: str) -> Iterator[None]:
return self.subtest(name)
general_symbols = dict(
start_all=self.start_all,
test_script=self.test_script,
machines=self.machines,
vlans=self.vlans,
driver=self,
log=rootlog,
os=os,
create_machine=self.create_machine,
subtest=subtest,
run_tests=self.run_tests,
join_all=self.join_all,
retry=retry,
serial_stdout_off=self.serial_stdout_off,
serial_stdout_on=self.serial_stdout_on,
polling_condition=self.polling_condition,
Machine=Machine, # for typing
)
machine_symbols = {m.name: m for m in self.machines}
# If there's exactly one machine, make it available under the name
# "machine", even if it's not called that.
if len(self.machines) == 1:
(machine_symbols["machine"],) = self.machines
vlan_symbols = {
f"vlan{v.nr}": self.vlans[idx] for idx, v in enumerate(self.vlans)
}
print(
"additionally exposed symbols:\n "
+ ", ".join(map(lambda m: m.name, self.machines))
+ ",\n "
+ ", ".join(map(lambda v: f"vlan{v.nr}", self.vlans))
+ ",\n "
+ ", ".join(list(general_symbols.keys()))
)
return {**general_symbols, **machine_symbols, **vlan_symbols}
def test_script(self) -> None:
"""Run the test script"""
with rootlog.nested("run the VM test script"):
symbols = self.test_symbols() # call eagerly
exec(self.tests, symbols, None)
def run_tests(self) -> None:
"""Run the test script (for non-interactive test runs)"""
self.test_script()
# TODO: Collect coverage data
for machine in self.machines:
if machine.is_up():
machine.execute("sync")
def start_all(self) -> None:
"""Start all machines"""
with rootlog.nested("start all VMs"):
for machine in self.machines:
machine.start()
def join_all(self) -> None:
"""Wait for all machines to shut down"""
with rootlog.nested("wait for all VMs to finish"):
for machine in self.machines:
machine.wait_for_shutdown()
def create_machine(self, args: Dict[str, Any]) -> Machine:
rootlog.warning(
"Using legacy create_machine(), please instantiate the"
"Machine class directly, instead"
)
tmp_dir = get_tmp_dir()
if args.get("startCommand"):
start_command: str = args.get("startCommand", "")
cmd = NixStartScript(start_command)
name = args.get("name", cmd.machine_name)
else:
cmd = Machine.create_startcommand(args) # type: ignore
name = args.get("name", "machine")
return Machine(
tmp_dir=tmp_dir,
out_dir=self.out_dir,
start_command=cmd,
name=name,
keep_vm_state=args.get("keep_vm_state", False),
allow_reboot=args.get("allow_reboot", False),
)
def serial_stdout_on(self) -> None:
rootlog._print_serial_logs = True
def serial_stdout_off(self) -> None:
rootlog._print_serial_logs = False
def check_polling_conditions(self) -> None:
for condition in self.polling_conditions:
condition.maybe_raise()
def polling_condition(
self,
fun_: Optional[Callable] = None,
*,
seconds_interval: float = 2.0,
description: Optional[str] = None,
) -> Union[Callable[[Callable], ContextManager], ContextManager]:
driver = self
class Poll:
def __init__(self, fun: Callable):
self.condition = PollingCondition(
fun,
seconds_interval,
description,
)
def __enter__(self) -> None:
driver.polling_conditions.append(self.condition)
def __exit__(self, a, b, c) -> None: # type: ignore
res = driver.polling_conditions.pop()
assert res is self.condition
if fun_ is None:
return Poll
else:
return Poll(fun_)
|
|
# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants as n_const
from networking_arista.tests.unit.ml2.security_groups import sg_test_base
class SecurityGroupCallbacksTestCase(sg_test_base.SecurityGroupTestBase):
def test_create_security_group(self):
sec_group = {'security_group':
{'name': 'sg1',
'tenant_id': 't1',
'description': ''}}
grp = self.plugin.create_security_group(self.context, sec_group,
default_sg=True)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-INGRESS-%s dynamic' % grp['id'],
'exit',
'ip access-list SG-EGRESS-%s dynamic' % grp['id'],
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_delete_security_group(self):
sec_group = {'security_group':
{'name': 'sg1',
'tenant_id': 't1',
'description': ''}}
grp = self.plugin.create_security_group(self.context, sec_group,
default_sg=True)
for switch in self.switches.values():
switch.clear_received_commands()
self.plugin.delete_security_group(self.context, grp['id'])
expected_eapi_commands = [
'enable',
'configure',
'no ip access-list SG-INGRESS-%s' % grp['id'],
'no ip access-list SG-EGRESS-%s' % grp['id'],
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_ingress(self):
direction = 'ingress'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-INGRESS-%s dynamic' % grp_id,
'permit %s %s any' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_egress(self):
direction = 'egress'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_tcp(self):
direction = 'egress'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_udp(self):
direction = 'egress'
proto = 'udp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_port_range(self):
direction = 'egress'
proto = 'tcp'
cidr = '10.0.0.0/24'
range_min = 100
range_max = 200
grp_id, _ = self.create_sg_rule(direction, proto, cidr,
range_min, range_max)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s range %s %s' % (proto, cidr,
range_min, range_max),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_basic_icmp(self):
direction = 'egress'
proto = 'icmp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_icmp_type(self):
direction = 'egress'
proto = 'icmp'
cidr = '10.0.0.0/24'
message_type = 10
grp_id, _ = self.create_sg_rule(direction, proto, cidr, message_type)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s %s' % (proto, cidr, message_type),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_icmp_code(self):
direction = 'egress'
proto = 'icmp'
cidr = '10.0.0.0/24'
message_type = 10
message_code = 100
grp_id, _ = self.create_sg_rule(direction, proto, cidr, message_type,
message_code)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s %s %s' % (proto, cidr, message_type,
message_code),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_no_ip(self):
direction = 'egress'
proto = 'tcp'
cidr = None
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any any' % proto,
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_ipv6(self):
direction = 'egress'
proto = 'tcp'
cidr = None
ethertype = 'IPv6'
grp_id, _ = self.create_sg_rule(direction, proto, cidr,
ethertype=ethertype)
for switch in self.switches.values():
self.assertEqual([], switch.received_commands)
def test_delete_security_group_rule(self):
direction = 'egress'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp_id, rule = self.create_sg_rule(direction, proto, cidr)
for switch in self.switches.values():
switch.clear_received_commands()
self.plugin.delete_security_group_rule(self.context, rule['id'])
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'no permit %s any %s' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_apply_security_group(self):
switch_port = 'Ethernet1'
switch_id = '11:22:33:44:55'
switch_info = 'TOR1'
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
for switch in self.switches.values():
switch.clear_received_commands()
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % switch_port,
'ip access-group SG-INGRESS-%s out' % grp_id,
'ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
# SGs are applied on binding and on status DOWN->UP,
# so expect the commands twice
expected_eapi_commands.extend(expected_eapi_commands)
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
def test_apply_security_group_lag(self):
switch_port = 'Ethernet1'
port_channel = 'Port-Channel100'
switch_id = '11:22:33:44:55'
switch_info = 'TOR1'
self.create_port_channel(switch_info, switch_port, port_channel)
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
for switch in self.switches.values():
switch.clear_received_commands()
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % port_channel,
'ip access-group SG-INGRESS-%s out' % grp_id,
'ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
# SGs are applied on binding and on status DOWN->UP,
# so expect the commands twice
expected_eapi_commands.extend(expected_eapi_commands)
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
def test_apply_security_group_mlag(self):
switch_port = 'Ethernet1'
port_channel = 'Port-Channel100'
switch_id = '11:22:33:44:55'
switch1_info = 'TOR1'
switch2_info = 'TOR2'
self.create_port_channel(switch1_info, switch_port, port_channel)
self.create_port_channel(switch2_info, switch_port, port_channel)
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
for switch in self.switches.values():
switch.clear_received_commands()
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch1_info},
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch2_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % port_channel,
'ip access-group SG-INGRESS-%s out' % grp_id,
'ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
# SGs are applied on binding and on status DOWN->UP,
# so expect the commands twice
expected_eapi_commands.extend(expected_eapi_commands)
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual(expected_eapi_commands,
self.switch2.received_commands)
def test_remove_security_group(self):
switch_port = 'Ethernet1'
switch_id = '11:22:33:44:55'
switch_info = 'TOR1'
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
for switch in self.switches.values():
switch.clear_received_commands()
self.delete_port(port['id'])
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % switch_port,
'no ip access-group SG-INGRESS-%s out' % grp_id,
'no ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
def test_remove_security_group_lag(self):
switch_port = 'Ethernet1'
port_channel = 'Port-Channel100'
switch_id = '11:22:33:44:55'
switch_info = 'TOR1'
self.create_port_channel(switch_info, switch_port, port_channel)
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
for switch in self.switches.values():
switch.clear_received_commands()
self.delete_port(port['id'])
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % port_channel,
'no ip access-group SG-INGRESS-%s out' % grp_id,
'no ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
def test_remove_security_group_mlag(self):
switch_port = 'Ethernet1'
port_channel = 'Port-Channel100'
switch_id = '11:22:33:44:55'
switch1_info = 'TOR1'
switch2_info = 'TOR2'
self.create_port_channel(switch1_info, switch_port, port_channel)
self.create_port_channel(switch2_info, switch_port, port_channel)
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch1_info},
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch2_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
for switch in self.switches.values():
switch.clear_received_commands()
self.delete_port(port['id'])
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % port_channel,
'no ip access-group SG-INGRESS-%s out' % grp_id,
'no ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual(expected_eapi_commands,
self.switch2.received_commands)
def test_apply_security_group_vm(self):
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'vm1',
'device_owner': n_const.DEVICE_OWNER_COMPUTE_PREFIX,
'binding:host_id': self.host1,
'binding:vnic_type': 'normal',
'security_groups': [grp_id]}
for switch in self.switches.values():
switch.clear_received_commands()
self.create_port(port_dict)
self.assertEqual([], self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
def test_apply_multiple_security_groups(self):
switch_id = '00:11:22:33:44:55'
switch_info = 'TOR1'
switch_port = 'Ethernet1'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp1_id, _ = self.create_sg_rule('egress', proto, cidr)
grp2_id, _ = self.create_sg_rule('ingress', proto, cidr,
default=False)
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp1_id, grp2_id]}
for switch in self.switches.values():
switch.clear_received_commands()
port, _ = self.create_port(port_dict)
self.assertEqual([], self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
|
|
import base64
import copy
import socket
import types
from collections import defaultdict
from itertools import count
from queue import Empty
from queue import Queue as _Queue
from unittest.mock import ANY, Mock, call, patch
import pytest
from kombu import Connection, Consumer, Exchange, Producer, Queue
from kombu.exceptions import VersionMismatch
from kombu.transport import virtual
from kombu.utils import eventio # patch poll
from kombu.utils.json import dumps
def _redis_modules():
class ConnectionError(Exception):
pass
class AuthenticationError(Exception):
pass
class InvalidData(Exception):
pass
class InvalidResponse(Exception):
pass
class ResponseError(Exception):
pass
exceptions = types.ModuleType('redis.exceptions')
exceptions.ConnectionError = ConnectionError
exceptions.AuthenticationError = AuthenticationError
exceptions.InvalidData = InvalidData
exceptions.InvalidResponse = InvalidResponse
exceptions.ResponseError = ResponseError
class Redis:
pass
myredis = types.ModuleType('redis')
myredis.exceptions = exceptions
myredis.Redis = Redis
return myredis, exceptions
class _poll(eventio._select):
def register(self, fd, flags):
if flags & eventio.READ:
self._rfd.add(fd)
def poll(self, timeout):
events = []
for fd in self._rfd:
if fd.data:
events.append((fd.fileno(), eventio.READ))
return events
eventio.poll = _poll
pytest.importorskip('redis')
# must import after poller patch, pep8 complains
from kombu.transport import redis # noqa
class ResponseError(Exception):
pass
class Client:
queues = {}
sets = defaultdict(set)
hashes = defaultdict(dict)
shard_hint = None
def __init__(self, db=None, port=None, connection_pool=None, **kwargs):
self._called = []
self._connection = None
self.bgsave_raises_ResponseError = False
self.connection = self._sconnection(self)
def bgsave(self):
self._called.append('BGSAVE')
if self.bgsave_raises_ResponseError:
raise ResponseError()
def delete(self, key):
self.queues.pop(key, None)
def exists(self, key):
return key in self.queues or key in self.sets
def hset(self, key, k, v):
self.hashes[key][k] = v
def hget(self, key, k):
return self.hashes[key].get(k)
def hdel(self, key, k):
self.hashes[key].pop(k, None)
def sadd(self, key, member, *args):
self.sets[key].add(member)
def zadd(self, key, *args):
if redis.redis.VERSION[0] >= 3:
(mapping,) = args
for item in mapping:
self.sets[key].add(item)
else:
# TODO: remove me when we drop support for Redis-py v2
(score1, member1) = args
self.sets[key].add(member1)
def smembers(self, key):
return self.sets.get(key, set())
def ping(self, *args, **kwargs):
return True
def srem(self, key, *args):
self.sets.pop(key, None)
zrem = srem
def llen(self, key):
try:
return self.queues[key].qsize()
except KeyError:
return 0
def lpush(self, key, value):
self.queues[key].put_nowait(value)
def parse_response(self, connection, type, **options):
cmd, queues = self.connection._sock.data.pop()
queues = list(queues)
assert cmd == type
self.connection._sock.data = []
if type == 'BRPOP':
timeout = queues.pop()
item = self.brpop(queues, timeout)
if item:
return item
raise Empty()
def brpop(self, keys, timeout=None):
for key in keys:
try:
item = self.queues[key].get_nowait()
except Empty:
pass
else:
return key, item
def rpop(self, key):
try:
return self.queues[key].get_nowait()
except (KeyError, Empty):
pass
def __contains__(self, k):
return k in self._called
def pipeline(self):
return Pipeline(self)
def encode(self, value):
return str(value)
def _new_queue(self, key):
self.queues[key] = _Queue()
class _sconnection:
disconnected = False
class _socket:
blocking = True
filenos = count(30)
def __init__(self, *args):
self._fileno = next(self.filenos)
self.data = []
def fileno(self):
return self._fileno
def setblocking(self, blocking):
self.blocking = blocking
def __init__(self, client):
self.client = client
self._sock = self._socket()
def disconnect(self):
self.disconnected = True
def send_command(self, cmd, *args):
self._sock.data.append((cmd, args))
def info(self):
return {'foo': 1}
def pubsub(self, *args, **kwargs):
connection = self.connection
class ConnectionPool:
def get_connection(self, *args, **kwargs):
return connection
self.connection_pool = ConnectionPool()
return self
class Pipeline:
def __init__(self, client):
self.client = client
self.stack = []
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
def __getattr__(self, key):
if key not in self.__dict__:
def _add(*args, **kwargs):
self.stack.append((getattr(self.client, key), args, kwargs))
return self
return _add
return self.__dict__[key]
def execute(self):
stack = list(self.stack)
self.stack[:] = []
return [fun(*args, **kwargs) for fun, args, kwargs in stack]
class Channel(redis.Channel):
def _get_client(self):
return Client
def _get_pool(self, asynchronous=False):
return Mock()
def _get_response_error(self):
return ResponseError
def _new_queue(self, queue, **kwargs):
for pri in self.priority_steps:
self.client._new_queue(self._q_for_pri(queue, pri))
def pipeline(self):
return Pipeline(Client())
class Transport(redis.Transport):
Channel = Channel
connection_errors = (KeyError,)
channel_errors = (IndexError,)
class test_Channel:
def setup(self):
self.connection = self.create_connection()
self.channel = self.connection.default_channel
def create_connection(self, **kwargs):
kwargs.setdefault('transport_options', {'fanout_patterns': True})
return Connection(transport=Transport, **kwargs)
def _get_one_delivery_tag(self, n='test_uniq_tag'):
with self.create_connection() as conn1:
chan = conn1.default_channel
chan.exchange_declare(n)
chan.queue_declare(n)
chan.queue_bind(n, n, n)
msg = chan.prepare_message('quick brown fox')
chan.basic_publish(msg, n, n)
payload = chan._get(n)
assert payload
pymsg = chan.message_to_python(payload)
return pymsg.delivery_tag
def test_delivery_tag_is_uuid(self):
seen = set()
for i in range(100):
tag = self._get_one_delivery_tag()
assert tag not in seen
seen.add(tag)
with pytest.raises(ValueError):
int(tag)
assert len(tag) == 36
def test_disable_ack_emulation(self):
conn = Connection(transport=Transport, transport_options={
'ack_emulation': False,
})
chan = conn.channel()
assert not chan.ack_emulation
assert chan.QoS == virtual.QoS
def test_redis_ping_raises(self):
pool = Mock(name='pool')
pool_at_init = [pool]
client = Mock(name='client')
class XChannel(Channel):
def __init__(self, *args, **kwargs):
self._pool = pool_at_init[0]
super().__init__(*args, **kwargs)
def _get_client(self):
return lambda *_, **__: client
class XTransport(Transport):
Channel = XChannel
conn = Connection(transport=XTransport)
client.ping.side_effect = RuntimeError()
with pytest.raises(RuntimeError):
conn.channel()
pool.disconnect.assert_called_with()
pool.disconnect.reset_mock()
pool_at_init = [None]
with pytest.raises(RuntimeError):
conn.channel()
pool.disconnect.assert_not_called()
def test_get_redis_ConnectionError(self):
from redis.exceptions import ConnectionError
from kombu.transport.redis import get_redis_ConnectionError
connection_error = get_redis_ConnectionError()
assert connection_error == ConnectionError
def test_after_fork_cleanup_channel(self):
from kombu.transport.redis import _after_fork_cleanup_channel
channel = Mock()
_after_fork_cleanup_channel(channel)
channel._after_fork.assert_called_once()
def test_after_fork(self):
self.channel._pool = None
self.channel._after_fork()
pool = self.channel._pool = Mock(name='pool')
self.channel._after_fork()
pool.disconnect.assert_called_with()
def test_next_delivery_tag(self):
assert (self.channel._next_delivery_tag() !=
self.channel._next_delivery_tag())
def test_do_restore_message(self):
client = Mock(name='client')
pl1 = {'body': 'BODY'}
spl1 = dumps(pl1)
lookup = self.channel._lookup = Mock(name='_lookup')
lookup.return_value = {'george', 'elaine'}
self.channel._do_restore_message(
pl1, 'ex', 'rkey', client,
)
client.rpush.assert_has_calls([
call('george', spl1), call('elaine', spl1),
], any_order=True)
client = Mock(name='client')
pl2 = {'body': 'BODY2', 'headers': {'x-funny': 1}}
headers_after = dict(pl2['headers'], redelivered=True)
spl2 = dumps(dict(pl2, headers=headers_after))
self.channel._do_restore_message(
pl2, 'ex', 'rkey', client,
)
client.rpush.assert_any_call('george', spl2)
client.rpush.assert_any_call('elaine', spl2)
client.rpush.side_effect = KeyError()
with patch('kombu.transport.redis.crit') as crit:
self.channel._do_restore_message(
pl2, 'ex', 'rkey', client,
)
crit.assert_called()
def test_do_restore_message_celery(self):
# Payload value from real Celery project
payload = {
"body": base64.b64encode(dumps([
[],
{},
{
"callbacks": None,
"errbacks": None,
"chain": None,
"chord": None,
},
]).encode()).decode(),
"content-encoding": "utf-8",
"content-type": "application/json",
"headers": {
"lang": "py",
"task": "common.tasks.test_task",
"id": "980ad2bf-104c-4ce0-8643-67d1947173f6",
"shadow": None,
"eta": None,
"expires": None,
"group": None,
"group_index": None,
"retries": 0,
"timelimit": [None, None],
"root_id": "980ad2bf-104c-4ce0-8643-67d1947173f6",
"parent_id": None,
"argsrepr": "()",
"kwargsrepr": "{}",
"origin": "gen3437@Desktop",
"ignore_result": False,
},
"properties": {
"correlation_id": "980ad2bf-104c-4ce0-8643-67d1947173f6",
"reply_to": "512f2489-ca40-3585-bc10-9b801a981782",
"delivery_mode": 2,
"delivery_info": {
"exchange": "",
"routing_key": "celery",
},
"priority": 0,
"body_encoding": "base64",
"delivery_tag": "badb725e-9c3e-45be-b0a4-07e44630519f",
},
}
result_payload = copy.deepcopy(payload)
result_payload['headers']['redelivered'] = True
result_payload['properties']['delivery_info']['redelivered'] = True
queue = 'celery'
client = Mock(name='client')
lookup = self.channel._lookup = Mock(name='_lookup')
lookup.return_value = [queue]
self.channel._do_restore_message(
payload, 'exchange', 'routing_key', client,
)
client.rpush.assert_called_with(queue, dumps(result_payload))
def test_restore_no_messages(self):
message = Mock(name='message')
with patch('kombu.transport.redis.loads') as loads:
def transaction_handler(restore_transaction, unacked_key):
assert unacked_key == self.channel.unacked_key
pipe = Mock(name='pipe')
pipe.hget.return_value = None
restore_transaction(pipe)
pipe.multi.assert_called_once_with()
pipe.hdel.assert_called_once_with(
unacked_key, message.delivery_tag)
loads.assert_not_called()
client = self.channel._create_client = Mock(name='client')
client = client()
client.transaction.side_effect = transaction_handler
self.channel._restore(message)
client.transaction.assert_called()
def test_restore_messages(self):
message = Mock(name='message')
with patch('kombu.transport.redis.loads') as loads:
def transaction_handler(restore_transaction, unacked_key):
assert unacked_key == self.channel.unacked_key
restore = self.channel._do_restore_message = Mock(
name='_do_restore_message',
)
result = Mock(name='result')
loads.return_value = 'M', 'EX', 'RK'
pipe = Mock(name='pipe')
pipe.hget.return_value = result
restore_transaction(pipe)
loads.assert_called_with(result)
pipe.multi.assert_called_once_with()
pipe.hdel.assert_called_once_with(
unacked_key, message.delivery_tag)
loads.assert_called()
restore.assert_called_with('M', 'EX', 'RK', pipe, False)
client = self.channel._create_client = Mock(name='client')
client = client()
client.transaction.side_effect = transaction_handler
self.channel._restore(message)
def test_qos_restore_visible(self):
client = self.channel._create_client = Mock(name='client')
client = client()
def pipe(*args, **kwargs):
return Pipeline(client)
client.pipeline = pipe
client.zrevrangebyscore.return_value = [
(1, 10),
(2, 20),
(3, 30),
]
qos = redis.QoS(self.channel)
restore = qos.restore_by_tag = Mock(name='restore_by_tag')
qos._vrestore_count = 1
qos.restore_visible()
client.zrevrangebyscore.assert_not_called()
assert qos._vrestore_count == 2
qos._vrestore_count = 0
qos.restore_visible()
restore.assert_has_calls([
call(1, client), call(2, client), call(3, client),
])
assert qos._vrestore_count == 1
qos._vrestore_count = 0
restore.reset_mock()
client.zrevrangebyscore.return_value = []
qos.restore_visible()
restore.assert_not_called()
assert qos._vrestore_count == 1
qos._vrestore_count = 0
client.setnx.side_effect = redis.MutexHeld()
qos.restore_visible()
def test_basic_consume_when_fanout_queue(self):
self.channel.exchange_declare(exchange='txconfan', type='fanout')
self.channel.queue_declare(queue='txconfanq')
self.channel.queue_bind(queue='txconfanq', exchange='txconfan')
assert 'txconfanq' in self.channel._fanout_queues
self.channel.basic_consume('txconfanq', False, None, 1)
assert 'txconfanq' in self.channel.active_fanout_queues
assert self.channel._fanout_to_queue.get('txconfan') == 'txconfanq'
def test_basic_cancel_unknown_delivery_tag(self):
assert self.channel.basic_cancel('txaseqwewq') is None
def test_subscribe_no_queues(self):
self.channel.subclient = Mock()
self.channel.active_fanout_queues.clear()
self.channel._subscribe()
self.channel.subclient.subscribe.assert_not_called()
def test_subscribe(self):
self.channel.subclient = Mock()
self.channel.active_fanout_queues.add('a')
self.channel.active_fanout_queues.add('b')
self.channel._fanout_queues.update(a=('a', ''), b=('b', ''))
self.channel._subscribe()
self.channel.subclient.psubscribe.assert_called()
s_args, _ = self.channel.subclient.psubscribe.call_args
assert sorted(s_args[0]) == ['/{db}.a', '/{db}.b']
self.channel.subclient.connection._sock = None
self.channel._subscribe()
self.channel.subclient.connection.connect.assert_called_with()
def test_handle_unsubscribe_message(self):
s = self.channel.subclient
s.subscribed = True
self.channel._handle_message(s, ['unsubscribe', 'a', 0])
assert not s.subscribed
def test_handle_pmessage_message(self):
res = self.channel._handle_message(
self.channel.subclient,
['pmessage', 'pattern', 'channel', 'data'],
)
assert res == {
'type': 'pmessage',
'pattern': 'pattern',
'channel': 'channel',
'data': 'data',
}
def test_handle_message(self):
res = self.channel._handle_message(
self.channel.subclient,
['type', 'channel', 'data'],
)
assert res == {
'type': 'type',
'pattern': None,
'channel': 'channel',
'data': 'data',
}
def test_brpop_start_but_no_queues(self):
assert self.channel._brpop_start() is None
def test_receive(self):
s = self.channel.subclient = Mock()
self.channel._fanout_to_queue['a'] = 'b'
self.channel.connection._deliver = Mock(name='_deliver')
message = {
'body': 'hello',
'properties': {
'delivery_tag': 1,
'delivery_info': {'exchange': 'E', 'routing_key': 'R'},
},
}
s.parse_response.return_value = ['message', 'a', dumps(message)]
self.channel._receive_one(self.channel.subclient)
self.channel.connection._deliver.assert_called_once_with(
message, 'b',
)
def test_receive_raises_for_connection_error(self):
self.channel._in_listen = True
s = self.channel.subclient = Mock()
s.parse_response.side_effect = KeyError('foo')
with pytest.raises(KeyError):
self.channel._receive_one(self.channel.subclient)
assert not self.channel._in_listen
def test_receive_empty(self):
s = self.channel.subclient = Mock()
s.parse_response.return_value = None
assert self.channel._receive_one(self.channel.subclient) is None
def test_receive_different_message_Type(self):
s = self.channel.subclient = Mock()
s.parse_response.return_value = ['message', '/foo/', 0, 'data']
assert self.channel._receive_one(self.channel.subclient) is None
def test_receive_invalid_response_type(self):
s = self.channel.subclient = Mock()
for resp in ['foo', None]:
s.parse_response.return_value = resp
assert self.channel._receive_one(self.channel.subclient) is None
def test_receive_connection_has_gone(self):
def _receive_one(c):
c.connection = None
_receive_one.called = True
return True
_receive_one.called = False
self.channel._receive_one = _receive_one
assert self.channel._receive()
assert _receive_one.called
def test_brpop_read_raises(self):
c = self.channel.client = Mock()
c.parse_response.side_effect = KeyError('foo')
with pytest.raises(KeyError):
self.channel._brpop_read()
c.connection.disconnect.assert_called_with()
def test_brpop_read_gives_None(self):
c = self.channel.client = Mock()
c.parse_response.return_value = None
with pytest.raises(redis.Empty):
self.channel._brpop_read()
def test_poll_error(self):
c = self.channel.client = Mock()
c.parse_response = Mock()
self.channel._poll_error('BRPOP')
c.parse_response.assert_called_with(c.connection, 'BRPOP')
c.parse_response.side_effect = KeyError('foo')
with pytest.raises(KeyError):
self.channel._poll_error('BRPOP')
def test_poll_error_on_type_LISTEN(self):
c = self.channel.subclient = Mock()
c.parse_response = Mock()
self.channel._poll_error('LISTEN')
c.parse_response.assert_called_with()
c.parse_response.side_effect = KeyError('foo')
with pytest.raises(KeyError):
self.channel._poll_error('LISTEN')
def test_put_fanout(self):
self.channel._in_poll = False
c = self.channel._create_client = Mock()
body = {'hello': 'world'}
self.channel._put_fanout('exchange', body, '')
c().publish.assert_called_with('/{db}.exchange', dumps(body))
def test_put_priority(self):
client = self.channel._create_client = Mock(name='client')
msg1 = {'properties': {'priority': 3}}
self.channel._put('george', msg1)
client().lpush.assert_called_with(
self.channel._q_for_pri('george', 3), dumps(msg1),
)
msg2 = {'properties': {'priority': 313}}
self.channel._put('george', msg2)
client().lpush.assert_called_with(
self.channel._q_for_pri('george', 9), dumps(msg2),
)
msg3 = {'properties': {}}
self.channel._put('george', msg3)
client().lpush.assert_called_with(
self.channel._q_for_pri('george', 0), dumps(msg3),
)
def test_delete(self):
x = self.channel
x._create_client = Mock()
x._create_client.return_value = x.client
delete = x.client.delete = Mock()
srem = x.client.srem = Mock()
x._delete('queue', 'exchange', 'routing_key', None)
delete.assert_has_calls([
call(x._q_for_pri('queue', pri)) for pri in redis.PRIORITY_STEPS
])
srem.assert_called_with(x.keyprefix_queue % ('exchange',),
x.sep.join(['routing_key', '', 'queue']))
def test_has_queue(self):
self.channel._create_client = Mock()
self.channel._create_client.return_value = self.channel.client
exists = self.channel.client.exists = Mock()
exists.return_value = True
assert self.channel._has_queue('foo')
exists.assert_has_calls([
call(self.channel._q_for_pri('foo', pri))
for pri in redis.PRIORITY_STEPS
])
exists.return_value = False
assert not self.channel._has_queue('foo')
def test_close_when_closed(self):
self.channel.closed = True
self.channel.close()
def test_close_deletes_autodelete_fanout_queues(self):
self.channel._fanout_queues = {'foo': ('foo', ''), 'bar': ('bar', '')}
self.channel.auto_delete_queues = ['foo']
self.channel.queue_delete = Mock(name='queue_delete')
client = self.channel.client
self.channel.close()
self.channel.queue_delete.assert_has_calls([
call('foo', client=client),
])
def test_close_client_close_raises(self):
c = self.channel.client = Mock()
connection = c.connection
connection.disconnect.side_effect = self.channel.ResponseError()
self.channel.close()
connection.disconnect.assert_called_with()
def test_invalid_database_raises_ValueError(self):
with pytest.raises(ValueError):
self.channel.connection.client.virtual_host = 'dwqeq'
self.channel._connparams()
def test_connparams_allows_slash_in_db(self):
self.channel.connection.client.virtual_host = '/123'
assert self.channel._connparams()['db'] == 123
def test_connparams_db_can_be_int(self):
self.channel.connection.client.virtual_host = 124
assert self.channel._connparams()['db'] == 124
def test_new_queue_with_auto_delete(self):
redis.Channel._new_queue(self.channel, 'george', auto_delete=False)
assert 'george' not in self.channel.auto_delete_queues
redis.Channel._new_queue(self.channel, 'elaine', auto_delete=True)
assert 'elaine' in self.channel.auto_delete_queues
def test_connparams_regular_hostname(self):
self.channel.connection.client.hostname = 'george.vandelay.com'
assert self.channel._connparams()['host'] == 'george.vandelay.com'
def test_connparams_username(self):
self.channel.connection.client.userid = 'kombu'
assert self.channel._connparams()['username'] == 'kombu'
def test_connparams_client_credentials(self):
self.channel.connection.client.hostname = \
'redis://foo:[email protected]:6379/0'
connection_parameters = self.channel._connparams()
assert connection_parameters['username'] == 'foo'
assert connection_parameters['password'] == 'bar'
def test_connparams_password_for_unix_socket(self):
self.channel.connection.client.hostname = \
'socket://:foo@/var/run/redis.sock'
connection_parameters = self.channel._connparams()
password = connection_parameters['password']
path = connection_parameters['path']
assert (password, path) == ('foo', '/var/run/redis.sock')
self.channel.connection.client.hostname = \
'socket://@/var/run/redis.sock'
connection_parameters = self.channel._connparams()
password = connection_parameters['password']
path = connection_parameters['path']
assert (password, path) == (None, '/var/run/redis.sock')
def test_connparams_health_check_interval_not_supported(self):
with patch('kombu.transport.redis.Channel._create_client'):
with Connection('redis+socket:///tmp/redis.sock') as conn:
conn.default_channel.connection_class = \
Mock(name='connection_class')
connparams = conn.default_channel._connparams()
assert 'health_check_interval' not in connparams
def test_connparams_health_check_interval_supported(self):
with patch('kombu.transport.redis.Channel._create_client'):
with Connection('redis+socket:///tmp/redis.sock') as conn:
connparams = conn.default_channel._connparams()
assert connparams['health_check_interval'] == 25
def test_rotate_cycle_ValueError(self):
cycle = self.channel._queue_cycle
cycle.update(['kramer', 'jerry'])
cycle.rotate('kramer')
assert cycle.items, ['jerry' == 'kramer']
cycle.rotate('elaine')
def test_get_client(self):
import redis as R
KombuRedis = redis.Channel._get_client(self.channel)
assert isinstance(KombuRedis(), R.StrictRedis)
Rv = getattr(R, 'VERSION', None)
try:
R.VERSION = (2, 4, 0)
with pytest.raises(VersionMismatch):
redis.Channel._get_client(self.channel)
finally:
if Rv is not None:
R.VERSION = Rv
def test_get_prefixed_client(self):
from kombu.transport.redis import PrefixedStrictRedis
self.channel.global_keyprefix = "test_"
PrefixedRedis = redis.Channel._get_client(self.channel)
assert isinstance(PrefixedRedis(), PrefixedStrictRedis)
def test_get_response_error(self):
from redis.exceptions import ResponseError
assert redis.Channel._get_response_error(self.channel) is ResponseError
def test_avail_client(self):
self.channel._pool = Mock()
cc = self.channel._create_client = Mock()
with self.channel.conn_or_acquire():
pass
cc.assert_called_with()
def test_register_with_event_loop(self):
transport = self.connection.transport
transport.cycle = Mock(name='cycle')
transport.cycle.fds = {12: 'LISTEN', 13: 'BRPOP'}
conn = Mock(name='conn')
conn.client = Mock(name='client', transport_options={})
loop = Mock(name='loop')
redis.Transport.register_with_event_loop(transport, conn, loop)
transport.cycle.on_poll_init.assert_called_with(loop.poller)
loop.call_repeatedly.assert_has_calls([
call(10, transport.cycle.maybe_restore_messages),
call(25, transport.cycle.maybe_check_subclient_health),
])
loop.on_tick.add.assert_called()
on_poll_start = loop.on_tick.add.call_args[0][0]
on_poll_start()
transport.cycle.on_poll_start.assert_called_with()
loop.add_reader.assert_has_calls([
call(12, transport.on_readable, 12),
call(13, transport.on_readable, 13),
])
@pytest.mark.parametrize('fds', [{12: 'LISTEN', 13: 'BRPOP'}, {}])
def test_register_with_event_loop__on_disconnect__loop_cleanup(self, fds):
"""Ensure event loop polling stops on disconnect (if started)."""
transport = self.connection.transport
self.connection._sock = None
transport.cycle = Mock(name='cycle')
transport.cycle.fds = fds
conn = Mock(name='conn')
conn.client = Mock(name='client', transport_options={})
loop = Mock(name='loop')
loop.on_tick = set()
redis.Transport.register_with_event_loop(transport, conn, loop)
assert len(loop.on_tick) == 1
transport.cycle._on_connection_disconnect(self.connection)
if fds:
assert len(loop.on_tick) == 0
else:
# on_tick shouldn't be cleared when polling hasn't started
assert len(loop.on_tick) == 1
def test_configurable_health_check(self):
transport = self.connection.transport
transport.cycle = Mock(name='cycle')
transport.cycle.fds = {12: 'LISTEN', 13: 'BRPOP'}
conn = Mock(name='conn')
conn.client = Mock(name='client', transport_options={
'health_check_interval': 15,
})
loop = Mock(name='loop')
redis.Transport.register_with_event_loop(transport, conn, loop)
transport.cycle.on_poll_init.assert_called_with(loop.poller)
loop.call_repeatedly.assert_has_calls([
call(10, transport.cycle.maybe_restore_messages),
call(15, transport.cycle.maybe_check_subclient_health),
])
loop.on_tick.add.assert_called()
on_poll_start = loop.on_tick.add.call_args[0][0]
on_poll_start()
transport.cycle.on_poll_start.assert_called_with()
loop.add_reader.assert_has_calls([
call(12, transport.on_readable, 12),
call(13, transport.on_readable, 13),
])
def test_transport_on_readable(self):
transport = self.connection.transport
cycle = transport.cycle = Mock(name='cyle')
cycle.on_readable.return_value = None
redis.Transport.on_readable(transport, 13)
cycle.on_readable.assert_called_with(13)
def test_transport_connection_errors(self):
"""Ensure connection_errors are populated."""
assert redis.Transport.connection_errors
def test_transport_channel_errors(self):
"""Ensure connection_errors are populated."""
assert redis.Transport.channel_errors
def test_transport_driver_version(self):
assert redis.Transport.driver_version(self.connection.transport)
def test_transport_errors_when_InvalidData_used(self):
from redis import exceptions
from kombu.transport.redis import get_redis_error_classes
class ID(Exception):
pass
DataError = getattr(exceptions, 'DataError', None)
InvalidData = getattr(exceptions, 'InvalidData', None)
exceptions.InvalidData = ID
exceptions.DataError = None
try:
errors = get_redis_error_classes()
assert errors
assert ID in errors[1]
finally:
if DataError is not None:
exceptions.DataError = DataError
if InvalidData is not None:
exceptions.InvalidData = InvalidData
def test_empty_queues_key(self):
channel = self.channel
channel._in_poll = False
key = channel.keyprefix_queue % 'celery'
# Everything is fine, there is a list of queues.
channel.client.sadd(key, 'celery\x06\x16\x06\x16celery')
assert channel.get_table('celery') == [
('celery', '', 'celery'),
]
# Remove one last queue from exchange. After this call no queue
# is in bound to exchange.
channel.client.srem(key)
# get_table() should return empty list of queues
assert self.channel.get_table('celery') == []
def test_socket_connection(self):
with patch('kombu.transport.redis.Channel._create_client'):
with Connection('redis+socket:///tmp/redis.sock') as conn:
connparams = conn.default_channel._connparams()
assert issubclass(
connparams['connection_class'],
redis.redis.UnixDomainSocketConnection,
)
assert connparams['path'] == '/tmp/redis.sock'
def test_ssl_argument__dict(self):
with patch('kombu.transport.redis.Channel._create_client'):
# Expected format for redis-py's SSLConnection class
ssl_params = {
'ssl_cert_reqs': 2,
'ssl_ca_certs': '/foo/ca.pem',
'ssl_certfile': '/foo/cert.crt',
'ssl_keyfile': '/foo/pkey.key'
}
with Connection('redis://', ssl=ssl_params) as conn:
params = conn.default_channel._connparams()
assert params['ssl_cert_reqs'] == ssl_params['ssl_cert_reqs']
assert params['ssl_ca_certs'] == ssl_params['ssl_ca_certs']
assert params['ssl_certfile'] == ssl_params['ssl_certfile']
assert params['ssl_keyfile'] == ssl_params['ssl_keyfile']
assert params.get('ssl') is None
def test_ssl_connection(self):
with patch('kombu.transport.redis.Channel._create_client'):
with Connection('redis://', ssl={'ssl_cert_reqs': 2}) as conn:
connparams = conn.default_channel._connparams()
assert issubclass(
connparams['connection_class'],
redis.redis.SSLConnection,
)
def test_rediss_connection(self):
with patch('kombu.transport.redis.Channel._create_client'):
with Connection('rediss://') as conn:
connparams = conn.default_channel._connparams()
assert issubclass(
connparams['connection_class'],
redis.redis.SSLConnection,
)
def test_sep_transport_option(self):
with Connection(transport=Transport, transport_options={
'sep': ':',
}) as conn:
key = conn.default_channel.keyprefix_queue % 'celery'
conn.default_channel.client.sadd(key, 'celery::celery')
assert conn.default_channel.sep == ':'
assert conn.default_channel.get_table('celery') == [
('celery', '', 'celery'),
]
@patch("redis.StrictRedis.execute_command")
def test_global_keyprefix(self, mock_execute_command):
from kombu.transport.redis import PrefixedStrictRedis
with Connection(transport=Transport) as conn:
client = PrefixedStrictRedis(global_keyprefix='foo_')
channel = conn.channel()
channel._create_client = Mock()
channel._create_client.return_value = client
body = {'hello': 'world'}
channel._put_fanout('exchange', body, '')
mock_execute_command.assert_called_with(
'PUBLISH',
'foo_/{db}.exchange',
dumps(body)
)
@patch("redis.StrictRedis.execute_command")
def test_global_keyprefix_queue_bind(self, mock_execute_command):
from kombu.transport.redis import PrefixedStrictRedis
with Connection(transport=Transport) as conn:
client = PrefixedStrictRedis(global_keyprefix='foo_')
channel = conn.channel()
channel._create_client = Mock()
channel._create_client.return_value = client
channel._queue_bind('default', '', None, 'queue')
mock_execute_command.assert_called_with(
'SADD',
'foo__kombu.binding.default',
'\x06\x16\x06\x16queue'
)
@patch("redis.client.PubSub.execute_command")
def test_global_keyprefix_pubsub(self, mock_execute_command):
from kombu.transport.redis import PrefixedStrictRedis
with Connection(transport=Transport) as conn:
client = PrefixedStrictRedis(global_keyprefix='foo_')
channel = conn.channel()
channel.global_keyprefix = 'foo_'
channel._create_client = Mock()
channel._create_client.return_value = client
channel.subclient.connection = Mock()
channel.active_fanout_queues.add('a')
channel._subscribe()
mock_execute_command.assert_called_with(
'PSUBSCRIBE',
'foo_/{db}.a',
)
class test_Redis:
def setup(self):
self.connection = Connection(transport=Transport)
self.exchange = Exchange('test_Redis', type='direct')
self.queue = Queue('test_Redis', self.exchange, 'test_Redis')
def teardown(self):
self.connection.close()
@pytest.mark.replace_module_value(redis.redis, 'VERSION', [3, 0, 0])
def test_publish__get_redispyv3(self, replace_module_value):
channel = self.connection.channel()
producer = Producer(channel, self.exchange, routing_key='test_Redis')
self.queue(channel).declare()
producer.publish({'hello': 'world'})
assert self.queue(channel).get().payload == {'hello': 'world'}
assert self.queue(channel).get() is None
assert self.queue(channel).get() is None
assert self.queue(channel).get() is None
@pytest.mark.replace_module_value(redis.redis, 'VERSION', [2, 5, 10])
def test_publish__get_redispyv2(self, replace_module_value):
channel = self.connection.channel()
producer = Producer(channel, self.exchange, routing_key='test_Redis')
self.queue(channel).declare()
producer.publish({'hello': 'world'})
assert self.queue(channel).get().payload == {'hello': 'world'}
assert self.queue(channel).get() is None
assert self.queue(channel).get() is None
assert self.queue(channel).get() is None
def test_publish__consume(self):
connection = Connection(transport=Transport)
channel = connection.channel()
producer = Producer(channel, self.exchange, routing_key='test_Redis')
consumer = Consumer(channel, queues=[self.queue])
producer.publish({'hello2': 'world2'})
_received = []
def callback(message_data, message):
_received.append(message_data)
message.ack()
consumer.register_callback(callback)
consumer.consume()
assert channel in channel.connection.cycle._channels
try:
connection.drain_events(timeout=1)
assert _received
with pytest.raises(socket.timeout):
connection.drain_events(timeout=0.01)
finally:
channel.close()
def test_purge(self):
channel = self.connection.channel()
producer = Producer(channel, self.exchange, routing_key='test_Redis')
self.queue(channel).declare()
for i in range(10):
producer.publish({'hello': f'world-{i}'})
assert channel._size('test_Redis') == 10
assert self.queue(channel).purge() == 10
channel.close()
def test_db_values(self):
Connection(virtual_host=1,
transport=Transport).channel()
Connection(virtual_host='1',
transport=Transport).channel()
Connection(virtual_host='/1',
transport=Transport).channel()
with pytest.raises(Exception):
Connection('redis:///foo').channel()
def test_db_port(self):
c1 = Connection(port=None, transport=Transport).channel()
c1.close()
c2 = Connection(port=9999, transport=Transport).channel()
c2.close()
def test_close_poller_not_active(self):
c = Connection(transport=Transport).channel()
cycle = c.connection.cycle
c.client.connection
c.close()
assert c not in cycle._channels
def test_close_ResponseError(self):
c = Connection(transport=Transport).channel()
c.client.bgsave_raises_ResponseError = True
c.close()
def test_close_disconnects(self):
c = Connection(transport=Transport).channel()
conn1 = c.client.connection
conn2 = c.subclient.connection
c.close()
assert conn1.disconnected
assert conn2.disconnected
def test_get__Empty(self):
channel = self.connection.channel()
with pytest.raises(Empty):
channel._get('does-not-exist')
channel.close()
@pytest.mark.ensured_modules(*_redis_modules())
def test_get_client(self, module_exists):
# with module_exists(*_redis_modules()):
conn = Connection(transport=Transport)
chan = conn.channel()
assert chan.Client
assert chan.ResponseError
assert conn.transport.connection_errors
assert conn.transport.channel_errors
def test_check_at_least_we_try_to_connect_and_fail(self):
import redis
connection = Connection('redis://localhost:65534/')
with pytest.raises(redis.exceptions.ConnectionError):
chan = connection.channel()
chan._size('some_queue')
class test_MultiChannelPoller:
def setup(self):
self.Poller = redis.MultiChannelPoller
def test_on_poll_start(self):
p = self.Poller()
p._channels = []
p.on_poll_start()
p._register_BRPOP = Mock(name='_register_BRPOP')
p._register_LISTEN = Mock(name='_register_LISTEN')
chan1 = Mock(name='chan1')
p._channels = [chan1]
chan1.active_queues = []
chan1.active_fanout_queues = []
p.on_poll_start()
chan1.active_queues = ['q1']
chan1.active_fanout_queues = ['q2']
chan1.qos.can_consume.return_value = False
p.on_poll_start()
p._register_LISTEN.assert_called_with(chan1)
p._register_BRPOP.assert_not_called()
chan1.qos.can_consume.return_value = True
p._register_LISTEN.reset_mock()
p.on_poll_start()
p._register_BRPOP.assert_called_with(chan1)
p._register_LISTEN.assert_called_with(chan1)
def test_on_poll_init(self):
p = self.Poller()
chan1 = Mock(name='chan1')
p._channels = []
poller = Mock(name='poller')
p.on_poll_init(poller)
assert p.poller is poller
p._channels = [chan1]
p.on_poll_init(poller)
chan1.qos.restore_visible.assert_called_with(
num=chan1.unacked_restore_limit,
)
def test_handle_event(self):
p = self.Poller()
chan = Mock(name='chan')
p._fd_to_chan[13] = chan, 'BRPOP'
chan.handlers = {'BRPOP': Mock(name='BRPOP')}
chan.qos.can_consume.return_value = False
p.handle_event(13, redis.READ)
chan.handlers['BRPOP'].assert_not_called()
chan.qos.can_consume.return_value = True
p.handle_event(13, redis.READ)
chan.handlers['BRPOP'].assert_called_with()
p.handle_event(13, redis.ERR)
chan._poll_error.assert_called_with('BRPOP')
p.handle_event(13, ~(redis.READ | redis.ERR))
def test_fds(self):
p = self.Poller()
p._fd_to_chan = {1: 2}
assert p.fds == p._fd_to_chan
def test_close_unregisters_fds(self):
p = self.Poller()
poller = p.poller = Mock()
p._chan_to_sock.update({1: 1, 2: 2, 3: 3})
p.close()
assert poller.unregister.call_count == 3
u_args = poller.unregister.call_args_list
assert sorted(u_args) == [
((1,), {}),
((2,), {}),
((3,), {}),
]
def test_close_when_unregister_raises_KeyError(self):
p = self.Poller()
p.poller = Mock()
p._chan_to_sock.update({1: 1})
p.poller.unregister.side_effect = KeyError(1)
p.close()
def test_close_resets_state(self):
p = self.Poller()
p.poller = Mock()
p._channels = Mock()
p._fd_to_chan = Mock()
p._chan_to_sock = Mock()
p._chan_to_sock.itervalues.return_value = []
p._chan_to_sock.values.return_value = [] # py3k
p.close()
p._channels.clear.assert_called_with()
p._fd_to_chan.clear.assert_called_with()
p._chan_to_sock.clear.assert_called_with()
def test_register_when_registered_reregisters(self):
p = self.Poller()
p.poller = Mock()
channel, client, type = Mock(), Mock(), Mock()
sock = client.connection._sock = Mock()
sock.fileno.return_value = 10
p._chan_to_sock = {(channel, client, type): 6}
p._register(channel, client, type)
p.poller.unregister.assert_called_with(6)
assert p._fd_to_chan[10] == (channel, type)
assert p._chan_to_sock[(channel, client, type)] == sock
p.poller.register.assert_called_with(sock, p.eventflags)
# when client not connected yet
client.connection._sock = None
def after_connected():
client.connection._sock = Mock()
client.connection.connect.side_effect = after_connected
p._register(channel, client, type)
client.connection.connect.assert_called_with()
def test_register_BRPOP(self):
p = self.Poller()
channel = Mock()
channel.client.connection._sock = None
p._register = Mock()
channel._in_poll = False
p._register_BRPOP(channel)
assert channel._brpop_start.call_count == 1
assert p._register.call_count == 1
channel.client.connection._sock = Mock()
p._chan_to_sock[(channel, channel.client, 'BRPOP')] = True
channel._in_poll = True
p._register_BRPOP(channel)
assert channel._brpop_start.call_count == 1
assert p._register.call_count == 1
def test_register_LISTEN(self):
p = self.Poller()
channel = Mock()
channel.subclient.connection._sock = None
channel._in_listen = False
p._register = Mock()
p._register_LISTEN(channel)
p._register.assert_called_with(channel, channel.subclient, 'LISTEN')
assert p._register.call_count == 1
assert channel._subscribe.call_count == 1
channel._in_listen = True
p._chan_to_sock[(channel, channel.subclient, 'LISTEN')] = 3
channel.subclient.connection._sock = Mock()
p._register_LISTEN(channel)
assert p._register.call_count == 1
assert channel._subscribe.call_count == 1
def create_get(self, events=None, queues=None, fanouts=None):
_pr = [] if events is None else events
_aq = [] if queues is None else queues
_af = [] if fanouts is None else fanouts
p = self.Poller()
p.poller = Mock()
p.poller.poll.return_value = _pr
p._register_BRPOP = Mock()
p._register_LISTEN = Mock()
channel = Mock()
p._channels = [channel]
channel.active_queues = _aq
channel.active_fanout_queues = _af
return p, channel
def test_get_no_actions(self):
p, channel = self.create_get()
with pytest.raises(redis.Empty):
p.get(Mock())
def test_qos_reject(self):
p, channel = self.create_get()
qos = redis.QoS(channel)
qos.ack = Mock(name='Qos.ack')
qos.reject(1234)
qos.ack.assert_called_with(1234)
def test_get_brpop_qos_allow(self):
p, channel = self.create_get(queues=['a_queue'])
channel.qos.can_consume.return_value = True
with pytest.raises(redis.Empty):
p.get(Mock())
p._register_BRPOP.assert_called_with(channel)
def test_get_brpop_qos_disallow(self):
p, channel = self.create_get(queues=['a_queue'])
channel.qos.can_consume.return_value = False
with pytest.raises(redis.Empty):
p.get(Mock())
p._register_BRPOP.assert_not_called()
def test_get_listen(self):
p, channel = self.create_get(fanouts=['f_queue'])
with pytest.raises(redis.Empty):
p.get(Mock())
p._register_LISTEN.assert_called_with(channel)
def test_get_receives_ERR(self):
p, channel = self.create_get(events=[(1, eventio.ERR)])
p._fd_to_chan[1] = (channel, 'BRPOP')
with pytest.raises(redis.Empty):
p.get(Mock())
channel._poll_error.assert_called_with('BRPOP')
def test_get_receives_multiple(self):
p, channel = self.create_get(events=[(1, eventio.ERR),
(1, eventio.ERR)])
p._fd_to_chan[1] = (channel, 'BRPOP')
with pytest.raises(redis.Empty):
p.get(Mock())
channel._poll_error.assert_called_with('BRPOP')
class test_Mutex:
def test_mutex(self, lock_id='xxx'):
client = Mock(name='client')
lock = client.lock.return_value = Mock(name='lock')
# Won
lock.acquire.return_value = True
held = False
with redis.Mutex(client, 'foo1', 100):
held = True
assert held
lock.acquire.assert_called_with(blocking=False)
client.lock.assert_called_with('foo1', timeout=100)
client.reset_mock()
lock.reset_mock()
# Did not win
lock.acquire.return_value = False
held = False
with pytest.raises(redis.MutexHeld):
with redis.Mutex(client, 'foo1', 100):
held = True
assert not held
lock.acquire.assert_called_with(blocking=False)
client.lock.assert_called_with('foo1', timeout=100)
client.reset_mock()
lock.reset_mock()
# Wins but raises LockNotOwnedError (and that is ignored)
lock.acquire.return_value = True
lock.release.side_effect = redis.redis.exceptions.LockNotOwnedError()
held = False
with redis.Mutex(client, 'foo1', 100):
held = True
assert held
class test_RedisSentinel:
def test_method_called(self):
from kombu.transport.redis import SentinelChannel
with patch.object(SentinelChannel, '_sentinel_managed_pool') as p:
connection = Connection(
'sentinel://localhost:65534/',
transport_options={
'master_name': 'not_important',
},
)
connection.channel()
p.assert_called()
def test_getting_master_from_sentinel(self):
with patch('redis.sentinel.Sentinel') as patched:
connection = Connection(
'sentinel://localhost/;'
'sentinel://localhost:65532/;'
'sentinel://user@localhost:65533/;'
'sentinel://:password@localhost:65534/;'
'sentinel://user:password@localhost:65535/;',
transport_options={
'master_name': 'not_important',
},
)
connection.channel()
patched.assert_called_once_with(
[
('localhost', 26379),
('localhost', 65532),
('localhost', 65533),
('localhost', 65534),
('localhost', 65535),
],
connection_class=ANY, db=0, max_connections=10,
min_other_sentinels=0, password=None, sentinel_kwargs=None,
socket_connect_timeout=None, socket_keepalive=None,
socket_keepalive_options=None, socket_timeout=None,
username=None, retry_on_timeout=None)
master_for = patched.return_value.master_for
master_for.assert_called()
master_for.assert_called_with('not_important', ANY)
master_for().connection_pool.get_connection.assert_called()
def test_getting_master_from_sentinel_single_node(self):
with patch('redis.sentinel.Sentinel') as patched:
connection = Connection(
'sentinel://localhost:65532/',
transport_options={
'master_name': 'not_important',
},
)
connection.channel()
patched.assert_called_once_with(
[('localhost', 65532)],
connection_class=ANY, db=0, max_connections=10,
min_other_sentinels=0, password=None, sentinel_kwargs=None,
socket_connect_timeout=None, socket_keepalive=None,
socket_keepalive_options=None, socket_timeout=None,
username=None, retry_on_timeout=None)
master_for = patched.return_value.master_for
master_for.assert_called()
master_for.assert_called_with('not_important', ANY)
master_for().connection_pool.get_connection.assert_called()
def test_can_create_connection(self):
from redis.exceptions import ConnectionError
connection = Connection(
'sentinel://localhost:65534/',
transport_options={
'master_name': 'not_important',
},
)
with pytest.raises(ConnectionError):
connection.channel()
def test_missing_master_name_transport_option(self):
connection = Connection(
'sentinel://localhost:65534/',
)
with patch('redis.sentinel.Sentinel'), \
pytest.raises(ValueError) as excinfo:
connection.connect()
expected = "'master_name' transport option must be specified."
assert expected == excinfo.value.args[0]
def test_sentinel_with_ssl(self):
ssl_params = {
'ssl_cert_reqs': 2,
'ssl_ca_certs': '/foo/ca.pem',
'ssl_certfile': '/foo/cert.crt',
'ssl_keyfile': '/foo/pkey.key'
}
with patch('redis.sentinel.Sentinel'):
with Connection(
'sentinel://',
transport_options={'master_name': 'not_important'},
ssl=ssl_params) as conn:
params = conn.default_channel._connparams()
assert params['ssl_cert_reqs'] == ssl_params['ssl_cert_reqs']
assert params['ssl_ca_certs'] == ssl_params['ssl_ca_certs']
assert params['ssl_certfile'] == ssl_params['ssl_certfile']
assert params['ssl_keyfile'] == ssl_params['ssl_keyfile']
assert params.get('ssl') is None
from kombu.transport.redis import SentinelManagedSSLConnection
assert (params['connection_class'] is
SentinelManagedSSLConnection)
class test_GlobalKeyPrefixMixin:
from kombu.transport.redis import GlobalKeyPrefixMixin
global_keyprefix = "prefix_"
mixin = GlobalKeyPrefixMixin()
mixin.global_keyprefix = global_keyprefix
def test_prefix_simple_args(self):
for command in self.mixin.PREFIXED_SIMPLE_COMMANDS:
prefixed_args = self.mixin._prefix_args([command, "fake_key"])
assert prefixed_args == [
command,
f"{self.global_keyprefix}fake_key"
]
def test_prefix_delete_args(self):
prefixed_args = self.mixin._prefix_args([
"DEL",
"fake_key",
"fake_key2",
"fake_key3"
])
assert prefixed_args == [
"DEL",
f"{self.global_keyprefix}fake_key",
f"{self.global_keyprefix}fake_key2",
f"{self.global_keyprefix}fake_key3",
]
def test_prefix_brpop_args(self):
prefixed_args = self.mixin._prefix_args([
"BRPOP",
"fake_key",
"fake_key2",
"not_prefixed"
])
assert prefixed_args == [
"BRPOP",
f"{self.global_keyprefix}fake_key",
f"{self.global_keyprefix}fake_key2",
"not_prefixed",
]
def test_prefix_evalsha_args(self):
prefixed_args = self.mixin._prefix_args([
"EVALSHA",
"not_prefixed",
"not_prefixed",
"fake_key",
"not_prefixed",
])
assert prefixed_args == [
"EVALSHA",
"not_prefixed",
"not_prefixed",
f"{self.global_keyprefix}fake_key",
"not_prefixed",
]
|
|
"""
The DoInterestManager keeps track of which parent/zones that we currently
have interest in. When you want to "look" into a zone you add an interest
to that zone. When you want to get rid of, or ignore, the objects in that
zone, remove interest in that zone.
p.s. A great deal of this code is just code moved from ClientRepository.py.
"""
from pandac.PandaModules import *
from .MsgTypes import *
from direct.showbase.PythonUtil import *
from direct.showbase import DirectObject
from .PyDatagram import PyDatagram
from direct.directnotify.DirectNotifyGlobal import directNotify
import types
from direct.showbase.PythonUtil import report
class InterestState:
StateActive = 'Active'
StatePendingDel = 'PendingDel'
def __init__(self, desc, state, context, event, parentId, zoneIdList,
eventCounter, auto=False):
self.desc = desc
self.state = state
self.context = context
# We must be ready to keep track of multiple events. If somebody
# requested an interest to be removed and we get a second request
# for removal of the same interest before we get a response for the
# first interest removal, we now have two parts of the codebase
# waiting for a response on the removal of a single interest.
self.events = []
self.eventCounter = eventCounter
if event:
self.addEvent(event)
self.parentId = parentId
self.zoneIdList = zoneIdList
self.auto = auto
def addEvent(self, event):
self.events.append(event)
self.eventCounter.num += 1
def getEvents(self):
return list(self.events)
def clearEvents(self):
self.eventCounter.num -= len(self.events)
assert self.eventCounter.num >= 0
self.events = []
def sendEvents(self):
for event in self.events:
messenger.send(event)
self.clearEvents()
def setDesc(self, desc):
self.desc = desc
def isPendingDelete(self):
return self.state == InterestState.StatePendingDel
def __repr__(self):
return 'InterestState(desc=%s, state=%s, context=%s, event=%s, parentId=%s, zoneIdList=%s)' % (
self.desc, self.state, self.context, self.events, self.parentId, self.zoneIdList)
class InterestHandle:
"""This class helps to ensure that valid handles get passed in to DoInterestManager funcs"""
def __init__(self, id):
self._id = id
def asInt(self):
return self._id
def __eq__(self, other):
if type(self) == type(other):
return self._id == other._id
return self._id == other
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._id)
# context value for interest changes that have no complete event
NO_CONTEXT = 0
class DoInterestManager(DirectObject.DirectObject):
"""
Top level Interest Manager
"""
notify = directNotify.newCategory("DoInterestManager")
InterestDebug = ConfigVariableBool('interest-debug', False)
# 'handle' is a number that represents a single interest set that the
# client has requested; the interest set may be modified
_HandleSerialNum = 0
# high bit is reserved for server interests
_HandleMask = 0x7FFF
# 'context' refers to a single request to change an interest set
_ContextIdSerialNum = 100
_ContextIdMask = 0x3FFFFFFF # avoid making Python create a long
_interests = {}
if __debug__:
_debug_interestHistory = []
_debug_maxDescriptionLen = 40
_SerialGen = SerialNumGen()
_SerialNum = serialNum()
def __init__(self):
assert DoInterestManager.notify.debugCall()
DirectObject.DirectObject.__init__(self)
self._addInterestEvent = uniqueName('DoInterestManager-Add')
self._removeInterestEvent = uniqueName('DoInterestManager-Remove')
self._noNewInterests = False
self._completeDelayedCallback = None
# keep track of request contexts that have not completed
self._completeEventCount = ScratchPad(num=0)
self._allInterestsCompleteCallbacks = []
def __verbose(self):
return self.InterestDebug or self.getVerbose()
def _getAnonymousEvent(self, desc):
return 'anonymous-%s-%s' % (desc, DoInterestManager._SerialGen.next())
def setNoNewInterests(self, flag):
self._noNewInterests = flag
def noNewInterests(self):
return self._noNewInterests
def setAllInterestsCompleteCallback(self, callback):
if ((self._completeEventCount.num == 0) and
(self._completeDelayedCallback is None)):
callback()
else:
self._allInterestsCompleteCallbacks.append(callback)
def getAllInterestsCompleteEvent(self):
return 'allInterestsComplete-%s' % DoInterestManager._SerialNum
def resetInterestStateForConnectionLoss(self):
DoInterestManager._interests.clear()
self._completeEventCount = ScratchPad(num=0)
if __debug__:
self._addDebugInterestHistory("RESET", "", 0, 0, 0, [])
def isValidInterestHandle(self, handle):
# pass in a handle (or anything else) and this will return true if it is
# still a valid interest handle
if not isinstance(handle, InterestHandle):
return False
return handle.asInt() in DoInterestManager._interests
def updateInterestDescription(self, handle, desc):
iState = DoInterestManager._interests.get(handle.asInt())
if iState:
iState.setDesc(desc)
def addInterest(self, parentId, zoneIdList, description, event=None):
"""
Look into a (set of) zone(s).
"""
assert DoInterestManager.notify.debugCall()
handle = self._getNextHandle()
# print 'base.cr.addInterest(',description,',',handle,'):',globalClock.getFrameCount()
if self._noNewInterests:
DoInterestManager.notify.warning(
"addInterest: addingInterests on delete: %s" % (handle))
return
# make sure we've got parenting rules set in the DC
if parentId not in (self.getGameDoId(),):
parent = self.getDo(parentId)
if not parent:
DoInterestManager.notify.error(
'addInterest: attempting to add interest under unknown object %s' % parentId)
else:
if not parent.hasParentingRules():
DoInterestManager.notify.error(
'addInterest: no setParentingRules defined in the DC for object %s (%s)'
'' % (parentId, parent.__class__.__name__))
if event:
contextId = self._getNextContextId()
else:
contextId = 0
# event = self._getAnonymousEvent('addInterest')
DoInterestManager._interests[handle] = InterestState(
description, InterestState.StateActive, contextId, event, parentId, zoneIdList, self._completeEventCount)
if self.__verbose():
print('CR::INTEREST.addInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s, event=%s)' % (
handle, parentId, zoneIdList, description, event))
self._sendAddInterest(handle, contextId, parentId, zoneIdList, description)
if event:
messenger.send(self._getAddInterestEvent(), [event])
assert self.printInterestsIfDebug()
return InterestHandle(handle)
def addAutoInterest(self, parentId, zoneIdList, description):
"""
Look into a (set of) zone(s).
"""
assert DoInterestManager.notify.debugCall()
handle = self._getNextHandle()
if self._noNewInterests:
DoInterestManager.notify.warning(
"addInterest: addingInterests on delete: %s" % (handle))
return
# make sure we've got parenting rules set in the DC
if parentId not in (self.getGameDoId(),):
parent = self.getDo(parentId)
if not parent:
DoInterestManager.notify.error(
'addInterest: attempting to add interest under unknown object %s' % parentId)
else:
if not parent.hasParentingRules():
DoInterestManager.notify.error(
'addInterest: no setParentingRules defined in the DC for object %s (%s)'
'' % (parentId, parent.__class__.__name__))
DoInterestManager._interests[handle] = InterestState(
description, InterestState.StateActive, 0, None, parentId, zoneIdList, self._completeEventCount, True)
if self.__verbose():
print('CR::INTEREST.addInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s)' % (
handle, parentId, zoneIdList, description))
assert self.printInterestsIfDebug()
return InterestHandle(handle)
def removeInterest(self, handle, event = None):
"""
Stop looking in a (set of) zone(s)
"""
# print 'base.cr.removeInterest(',handle,'):',globalClock.getFrameCount()
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
existed = False
if not event:
event = self._getAnonymousEvent('removeInterest')
handle = handle.asInt()
if handle in DoInterestManager._interests:
existed = True
intState = DoInterestManager._interests[handle]
if event:
messenger.send(self._getRemoveInterestEvent(),
[event, intState.parentId, intState.zoneIdList])
if intState.isPendingDelete():
self.notify.warning(
'removeInterest: interest %s already pending removal' %
handle)
# this interest is already pending delete, so let's just tack this
# callback onto the list
if event is not None:
intState.addEvent(event)
else:
if len(intState.events) > 0:
# we're not pending a removal, but we have outstanding events?
# probably we are waiting for an add/alter complete.
# should we send those events now?
assert self.notify.warning('removeInterest: abandoning events: %s' %
intState.events)
intState.clearEvents()
intState.state = InterestState.StatePendingDel
contextId = self._getNextContextId()
intState.context = contextId
if event:
intState.addEvent(event)
self._sendRemoveInterest(handle, contextId)
if not event:
self._considerRemoveInterest(handle)
if self.__verbose():
print('CR::INTEREST.removeInterest(handle=%s, event=%s)' % (
handle, event))
else:
DoInterestManager.notify.warning(
"removeInterest: handle not found: %s" % (handle))
assert self.printInterestsIfDebug()
return existed
def removeAutoInterest(self, handle):
"""
Stop looking in a (set of) zone(s)
"""
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
existed = False
handle = handle.asInt()
if handle in DoInterestManager._interests:
existed = True
intState = DoInterestManager._interests[handle]
if intState.isPendingDelete():
self.notify.warning(
'removeInterest: interest %s already pending removal' %
handle)
# this interest is already pending delete, so let's just tack this
# callback onto the list
else:
if len(intState.events) > 0:
# we're not pending a removal, but we have outstanding events?
# probably we are waiting for an add/alter complete.
# should we send those events now?
self.notify.warning('removeInterest: abandoning events: %s' %
intState.events)
intState.clearEvents()
intState.state = InterestState.StatePendingDel
self._considerRemoveInterest(handle)
if self.__verbose():
print('CR::INTEREST.removeAutoInterest(handle=%s)' % (handle))
else:
DoInterestManager.notify.warning(
"removeInterest: handle not found: %s" % (handle))
assert self.printInterestsIfDebug()
return existed
@report(types = ['args'], dConfigParam = 'guildmgr')
def removeAIInterest(self, handle):
"""
handle is NOT an InterestHandle. It's just a bare integer representing an
AI opened interest. We're making the client close down this interest since
the AI has trouble removing interests(that its opened) when the avatar goes
offline. See GuildManager(UD) for how it's being used.
"""
self._sendRemoveAIInterest(handle)
def alterInterest(self, handle, parentId, zoneIdList, description=None,
event=None):
"""
Removes old interests and adds new interests.
Note that when an interest is changed, only the most recent
change's event will be triggered. Previous events are abandoned.
If this is a problem, consider opening multiple interests.
"""
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
#assert not self._noNewInterests
handle = handle.asInt()
if self._noNewInterests:
DoInterestManager.notify.warning(
"alterInterest: addingInterests on delete: %s" % (handle))
return
exists = False
if event is None:
event = self._getAnonymousEvent('alterInterest')
if handle in DoInterestManager._interests:
if description is not None:
DoInterestManager._interests[handle].desc = description
else:
description = DoInterestManager._interests[handle].desc
# are we overriding an existing change?
if DoInterestManager._interests[handle].context != NO_CONTEXT:
DoInterestManager._interests[handle].clearEvents()
contextId = self._getNextContextId()
DoInterestManager._interests[handle].context = contextId
DoInterestManager._interests[handle].parentId = parentId
DoInterestManager._interests[handle].zoneIdList = zoneIdList
DoInterestManager._interests[handle].addEvent(event)
if self.__verbose():
print('CR::INTEREST.alterInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s, event=%s)' % (
handle, parentId, zoneIdList, description, event))
self._sendAddInterest(handle, contextId, parentId, zoneIdList, description, action='modify')
exists = True
assert self.printInterestsIfDebug()
else:
DoInterestManager.notify.warning(
"alterInterest: handle not found: %s" % (handle))
return exists
def openAutoInterests(self, obj):
if hasattr(obj, '_autoInterestHandle'):
# must be multiple inheritance
self.notify.debug('openAutoInterests(%s): interests already open' % obj.__class__.__name__)
return
autoInterests = obj.getAutoInterests()
obj._autoInterestHandle = None
if not len(autoInterests):
return
obj._autoInterestHandle = self.addAutoInterest(obj.doId, autoInterests, '%s-autoInterest' % obj.__class__.__name__)
def closeAutoInterests(self, obj):
if not hasattr(obj, '_autoInterestHandle'):
# must be multiple inheritance
self.notify.debug('closeAutoInterests(%s): interests already closed' % obj)
return
if obj._autoInterestHandle is not None:
self.removeAutoInterest(obj._autoInterestHandle)
del obj._autoInterestHandle
# events for InterestWatcher
def _getAddInterestEvent(self):
return self._addInterestEvent
def _getRemoveInterestEvent(self):
return self._removeInterestEvent
def _getInterestState(self, handle):
return DoInterestManager._interests[handle]
def _getNextHandle(self):
handle = DoInterestManager._HandleSerialNum
while True:
handle = (handle + 1) & DoInterestManager._HandleMask
# skip handles that are already in use
if handle not in DoInterestManager._interests:
break
DoInterestManager.notify.warning(
'interest %s already in use' % handle)
DoInterestManager._HandleSerialNum = handle
return DoInterestManager._HandleSerialNum
def _getNextContextId(self):
contextId = DoInterestManager._ContextIdSerialNum
while True:
contextId = (contextId + 1) & DoInterestManager._ContextIdMask
# skip over the 'no context' id
if contextId != NO_CONTEXT:
break
DoInterestManager._ContextIdSerialNum = contextId
return DoInterestManager._ContextIdSerialNum
def _considerRemoveInterest(self, handle):
"""
Consider whether we should cull the interest set.
"""
assert DoInterestManager.notify.debugCall()
if handle in DoInterestManager._interests:
if DoInterestManager._interests[handle].isPendingDelete():
# make sure there is no pending event for this interest
if DoInterestManager._interests[handle].context == NO_CONTEXT:
assert len(DoInterestManager._interests[handle].events) == 0
del DoInterestManager._interests[handle]
if __debug__:
def printInterestsIfDebug(self):
if DoInterestManager.notify.getDebug():
self.printInterests()
return 1 # for assert
def _addDebugInterestHistory(self, action, description, handle,
contextId, parentId, zoneIdList):
if description is None:
description = ''
DoInterestManager._debug_interestHistory.append(
(action, description, handle, contextId, parentId, zoneIdList))
DoInterestManager._debug_maxDescriptionLen = max(
DoInterestManager._debug_maxDescriptionLen, len(description))
def printInterestHistory(self):
print("***************** Interest History *************")
format = '%9s %' + str(DoInterestManager._debug_maxDescriptionLen) + 's %6s %6s %9s %s'
print(format % (
"Action", "Description", "Handle", "Context", "ParentId",
"ZoneIdList"))
for i in DoInterestManager._debug_interestHistory:
print(format % tuple(i))
print("Note: interests with a Context of 0 do not get" \
" done/finished notices.")
def printInterestSets(self):
print("******************* Interest Sets **************")
format = '%6s %' + str(DoInterestManager._debug_maxDescriptionLen) + 's %11s %11s %8s %8s %8s'
print(format % (
"Handle", "Description",
"ParentId", "ZoneIdList",
"State", "Context",
"Event"))
for id, state in DoInterestManager._interests.items():
if len(state.events) == 0:
event = ''
elif len(state.events) == 1:
event = state.events[0]
else:
event = state.events
print(format % (id, state.desc,
state.parentId, state.zoneIdList,
state.state, state.context,
event))
print("************************************************")
def printInterests(self):
self.printInterestHistory()
self.printInterestSets()
def _sendAddInterest(self, handle, contextId, parentId, zoneIdList, description,
action=None):
"""
Part of the new otp-server code.
handle is a client-side created number that refers to
a set of interests. The same handle number doesn't
necessarily have any relationship to the same handle
on another client.
"""
assert DoInterestManager.notify.debugCall()
if __debug__:
if isinstance(zoneIdList, list):
zoneIdList.sort()
if action is None:
action = 'add'
self._addDebugInterestHistory(
action, description, handle, contextId, parentId, zoneIdList)
if parentId == 0:
DoInterestManager.notify.error(
'trying to set interest to invalid parent: %s' % parentId)
datagram = PyDatagram()
# Add message type
datagram.addUint16(CLIENT_ADD_INTEREST)
datagram.addUint16(handle)
datagram.addUint32(contextId)
datagram.addUint32(parentId)
if isinstance(zoneIdList, list):
vzl = list(zoneIdList)
vzl.sort()
uniqueElements(vzl)
for zone in vzl:
datagram.addUint32(zone)
else:
datagram.addUint32(zoneIdList)
self.send(datagram)
def _sendRemoveInterest(self, handle, contextId):
"""
handle is a client-side created number that refers to
a set of interests. The same handle number doesn't
necessarily have any relationship to the same handle
on another client.
"""
assert DoInterestManager.notify.debugCall()
assert handle in DoInterestManager._interests
datagram = PyDatagram()
# Add message type
datagram.addUint16(CLIENT_REMOVE_INTEREST)
datagram.addUint16(handle)
if contextId != 0:
datagram.addUint32(contextId)
self.send(datagram)
if __debug__:
state = DoInterestManager._interests[handle]
self._addDebugInterestHistory(
"remove", state.desc, handle, contextId,
state.parentId, state.zoneIdList)
def _sendRemoveAIInterest(self, handle):
"""
handle is a bare int, NOT an InterestHandle. Use this to
close an AI opened interest.
"""
datagram = PyDatagram()
# Add message type
datagram.addUint16(CLIENT_REMOVE_INTEREST)
datagram.addUint16((1<<15) + handle)
self.send(datagram)
def cleanupWaitAllInterestsComplete(self):
if self._completeDelayedCallback is not None:
self._completeDelayedCallback.destroy()
self._completeDelayedCallback = None
def queueAllInterestsCompleteEvent(self, frames=5):
# wait for N frames, if no new interests, send out all-done event
# calling this is OK even if there are no pending interest completes
def checkMoreInterests():
# if there are new interests, cancel this delayed callback, another
# will automatically be scheduled when all interests complete
# print 'checkMoreInterests(',self._completeEventCount.num,'):',globalClock.getFrameCount()
return self._completeEventCount.num > 0
def sendEvent():
messenger.send(self.getAllInterestsCompleteEvent())
for callback in self._allInterestsCompleteCallbacks:
callback()
self._allInterestsCompleteCallbacks = []
self.cleanupWaitAllInterestsComplete()
self._completeDelayedCallback = FrameDelayedCall(
'waitForAllInterestCompletes',
callback=sendEvent,
frames=frames,
cancelFunc=checkMoreInterests)
checkMoreInterests = None
sendEvent = None
def handleInterestDoneMessage(self, di):
"""
This handles the interest done messages and may dispatch an event
"""
assert DoInterestManager.notify.debugCall()
handle = di.getUint16()
contextId = di.getUint32()
if self.__verbose():
print('CR::INTEREST.interestDone(handle=%s)' % handle)
DoInterestManager.notify.debug(
"handleInterestDoneMessage--> Received handle %s, context %s" % (
handle, contextId))
if handle in DoInterestManager._interests:
eventsToSend = []
# if the context matches, send out the event
if contextId == DoInterestManager._interests[handle].context:
DoInterestManager._interests[handle].context = NO_CONTEXT
# the event handlers may call back into the interest manager. Send out
# the events after we're once again in a stable state.
#DoInterestManager._interests[handle].sendEvents()
eventsToSend = list(DoInterestManager._interests[handle].getEvents())
DoInterestManager._interests[handle].clearEvents()
else:
DoInterestManager.notify.debug(
"handleInterestDoneMessage--> handle: %s: Expecting context %s, got %s" % (
handle, DoInterestManager._interests[handle].context, contextId))
if __debug__:
state = DoInterestManager._interests[handle]
self._addDebugInterestHistory(
"finished", state.desc, handle, contextId, state.parentId,
state.zoneIdList)
self._considerRemoveInterest(handle)
for event in eventsToSend:
messenger.send(event)
else:
DoInterestManager.notify.warning(
"handleInterestDoneMessage: handle not found: %s" % (handle))
# if there are no more outstanding interest-completes, send out global all-done event
if self._completeEventCount.num == 0:
self.queueAllInterestsCompleteEvent()
assert self.printInterestsIfDebug()
if __debug__:
import unittest
class AsyncTestCase(unittest.TestCase):
def setCompleted(self):
self._async_completed = True
def isCompleted(self):
return getattr(self, '_async_completed', False)
class AsyncTestSuite(unittest.TestSuite):
pass
class AsyncTestLoader(unittest.TestLoader):
suiteClass = AsyncTestSuite
class AsyncTextTestRunner(unittest.TextTestRunner):
def run(self, testCase):
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
class TestInterestAddRemove(AsyncTestCase, DirectObject.DirectObject):
def testInterestAdd(self):
event = uniqueName('InterestAdd')
self.acceptOnce(event, self.gotInterestAddResponse)
self.handle = base.cr.addInterest(base.cr.GameGlobalsId, 100, 'TestInterest', event=event)
def gotInterestAddResponse(self):
event = uniqueName('InterestRemove')
self.acceptOnce(event, self.gotInterestRemoveResponse)
base.cr.removeInterest(self.handle, event=event)
def gotInterestRemoveResponse(self):
self.setCompleted()
def runTests():
suite = unittest.makeSuite(TestInterestAddRemove)
unittest.AsyncTextTestRunner(verbosity=2).run(suite)
|
|
import copy
import os
from contextlib import contextmanager
from zope.interface import (
implementer,
alsoProvides,
)
from pyramid.interfaces import (
IRequest,
IResponseFactory,
ISession,
)
from pyramid.compat import (
PY3,
PYPY,
class_types,
)
from pyramid.config import Configurator
from pyramid.decorator import reify
from pyramid.response import Response
from pyramid.registry import Registry
from pyramid.security import (
Authenticated,
Everyone,
)
from pyramid.threadlocal import (
get_current_registry,
manager,
)
from pyramid.i18n import LocalizerRequestMixin
from pyramid.request import CallbackMethodsMixin
from pyramid.url import URLMethodsMixin
from pyramid.util import InstancePropertyMixin
_marker = object()
class DummyRootFactory(object):
__parent__ = None
__name__ = None
def __init__(self, request):
if 'bfg.routes.matchdict' in request:
self.__dict__.update(request['bfg.routes.matchdict'])
class DummySecurityPolicy(object):
""" A standin for both an IAuthentication and IAuthorization policy """
def __init__(self, userid=None, groupids=(), permissive=True,
remember_result=None, forget_result=None):
self.userid = userid
self.groupids = groupids
self.permissive = permissive
if remember_result is None:
remember_result = []
if forget_result is None:
forget_result = []
self.remember_result = remember_result
self.forget_result = forget_result
def authenticated_userid(self, request):
return self.userid
def unauthenticated_userid(self, request):
return self.userid
def effective_principals(self, request):
effective_principals = [Everyone]
if self.userid:
effective_principals.append(Authenticated)
effective_principals.append(self.userid)
effective_principals.extend(self.groupids)
return effective_principals
def remember(self, request, principal, **kw):
self.remembered = principal
return self.remember_result
def forget(self, request):
self.forgotten = True
return self.forget_result
def permits(self, context, principals, permission):
return self.permissive
def principals_allowed_by_permission(self, context, permission):
return self.effective_principals(None)
class DummyTemplateRenderer(object):
"""
An instance of this class is returned from
:meth:`pyramid.config.Configurator.testing_add_renderer`. It has a
helper function (``assert_``) that makes it possible to make an
assertion which compares data passed to the renderer by the view
function against expected key/value pairs.
"""
def __init__(self, string_response=''):
self._received = {}
self._string_response = string_response
self._implementation = MockTemplate(string_response)
# For in-the-wild test code that doesn't create its own renderer,
# but mutates our internals instead. When all you read is the
# source code, *everything* is an API!
def _get_string_response(self):
return self._string_response
def _set_string_response(self, response):
self._string_response = response
self._implementation.response = response
string_response = property(_get_string_response, _set_string_response)
def implementation(self):
return self._implementation
def __call__(self, kw, system=None):
if system:
self._received.update(system)
self._received.update(kw)
return self.string_response
def __getattr__(self, k):
""" Backwards compatibility """
val = self._received.get(k, _marker)
if val is _marker:
val = self._implementation._received.get(k, _marker)
if val is _marker:
raise AttributeError(k)
return val
def assert_(self, **kw):
""" Accept an arbitrary set of assertion key/value pairs. For
each assertion key/value pair assert that the renderer
(eg. :func:`pyramid.renderers.render_to_response`)
received the key with a value that equals the asserted
value. If the renderer did not receive the key at all, or the
value received by the renderer doesn't match the assertion
value, raise an :exc:`AssertionError`."""
for k, v in kw.items():
myval = self._received.get(k, _marker)
if myval is _marker:
myval = self._implementation._received.get(k, _marker)
if myval is _marker:
raise AssertionError(
'A value for key "%s" was not passed to the renderer'
% k)
if myval != v:
raise AssertionError(
'\nasserted value for %s: %r\nactual value: %r' % (
k, v, myval))
return True
class DummyResource:
""" A dummy :app:`Pyramid` :term:`resource` object."""
def __init__(self, __name__=None, __parent__=None, __provides__=None,
**kw):
""" The resource's ``__name__`` attribute will be set to the
value of the ``__name__`` argument, and the resource's
``__parent__`` attribute will be set to the value of the
``__parent__`` argument. If ``__provides__`` is specified, it
should be an interface object or tuple of interface objects
that will be attached to the resulting resource via
:func:`zope.interface.alsoProvides`. Any extra keywords passed
in the ``kw`` argumnent will be set as direct attributes of
the resource object.
.. note:: For backwards compatibility purposes, this class can also
be imported as :class:`pyramid.testing.DummyModel`.
"""
self.__name__ = __name__
self.__parent__ = __parent__
if __provides__ is not None:
alsoProvides(self, __provides__)
self.kw = kw
self.__dict__.update(**kw)
self.subs = {}
def __setitem__(self, name, val):
""" When the ``__setitem__`` method is called, the object
passed in as ``val`` will be decorated with a ``__parent__``
attribute pointing at the dummy resource and a ``__name__``
attribute that is the value of ``name``. The value will then
be returned when dummy resource's ``__getitem__`` is called with
the name ``name```."""
val.__name__ = name
val.__parent__ = self
self.subs[name] = val
def __getitem__(self, name):
""" Return a named subobject (see ``__setitem__``)"""
ob = self.subs[name]
return ob
def __delitem__(self, name):
del self.subs[name]
def get(self, name, default=None):
return self.subs.get(name, default)
def values(self):
""" Return the values set by __setitem__ """
return self.subs.values()
def items(self):
""" Return the items set by __setitem__ """
return self.subs.items()
def keys(self):
""" Return the keys set by __setitem__ """
return self.subs.keys()
__iter__ = keys
def __nonzero__(self):
return True
__bool__ = __nonzero__
def __len__(self):
return len(self.subs)
def __contains__(self, name):
return name in self.subs
def clone(self, __name__=_marker, __parent__=_marker, **kw):
""" Create a clone of the resource object. If ``__name__`` or
``__parent__`` arguments are passed, use these values to
override the existing ``__name__`` or ``__parent__`` of the
resource. If any extra keyword args are passed in via the ``kw``
argument, use these keywords to add to or override existing
resource keywords (attributes)."""
oldkw = self.kw.copy()
oldkw.update(kw)
inst = self.__class__(self.__name__, self.__parent__, **oldkw)
inst.subs = copy.deepcopy(self.subs)
if __name__ is not _marker:
inst.__name__ = __name__
if __parent__ is not _marker:
inst.__parent__ = __parent__
return inst
DummyModel = DummyResource # b/w compat (forever)
@implementer(ISession)
class DummySession(dict):
created = None
new = True
def changed(self):
pass
def invalidate(self):
self.clear()
def flash(self, msg, queue='', allow_duplicate=True):
storage = self.setdefault('_f_' + queue, [])
if allow_duplicate or (msg not in storage):
storage.append(msg)
def pop_flash(self, queue=''):
storage = self.pop('_f_' + queue, [])
return storage
def peek_flash(self, queue=''):
storage = self.get('_f_' + queue, [])
return storage
def new_csrf_token(self):
token = '0123456789012345678901234567890123456789'
self['_csrft_'] = token
return token
def get_csrf_token(self):
token = self.get('_csrft_', None)
if token is None:
token = self.new_csrf_token()
return token
@implementer(IRequest)
class DummyRequest(URLMethodsMixin, CallbackMethodsMixin, InstancePropertyMixin,
LocalizerRequestMixin):
""" A DummyRequest object (incompletely) imitates a :term:`request` object.
The ``params``, ``environ``, ``headers``, ``path``, and
``cookies`` arguments correspond to their :term:`WebOb`
equivalents.
The ``post`` argument, if passed, populates the request's
``POST`` attribute, but *not* ``params``, in order to allow testing
that the app accepts data for a given view only from POST requests.
This argument also sets ``self.method`` to "POST".
Extra keyword arguments are assigned as attributes of the request
itself.
Note that DummyRequest does not have complete fidelity with a "real"
request. For example, by default, the DummyRequest ``GET`` and ``POST``
attributes are of type ``dict``, unlike a normal Request's GET and POST,
which are of type ``MultiDict``. If your code uses the features of
MultiDict, you should either use a real :class:`pyramid.request.Request`
or adapt your DummyRequest by replacing the attributes with ``MultiDict``
instances.
Other similar incompatibilities exist. If you need all the features of
a Request, use the :class:`pyramid.request.Request` class itself rather
than this class while writing tests.
"""
method = 'GET'
application_url = 'http://example.com'
host = 'example.com:80'
content_length = 0
query_string = ''
charset = 'UTF-8'
script_name = ''
_registry = None
def __init__(self, params=None, environ=None, headers=None, path='/',
cookies=None, post=None, **kw):
if environ is None:
environ = {}
if params is None:
params = {}
if headers is None:
headers = {}
if cookies is None:
cookies = {}
self.environ = environ
self.headers = headers
self.params = params
self.cookies = cookies
self.matchdict = {}
self.GET = params
if post is not None:
self.method = 'POST'
self.POST = post
else:
self.POST = params
self.host_url = self.application_url
self.path_url = self.application_url
self.url = self.application_url
self.path = path
self.path_info = path
self.script_name = ''
self.path_qs = ''
self.body = ''
self.view_name = ''
self.subpath = ()
self.traversed = ()
self.virtual_root_path = ()
self.context = None
self.root = None
self.virtual_root = None
self.marshalled = params # repoze.monty
self.session = DummySession()
self.__dict__.update(kw)
def _get_registry(self):
if self._registry is None:
return get_current_registry()
return self._registry
def _set_registry(self, registry):
self._registry = registry
def _del_registry(self):
self._registry = None
registry = property(_get_registry, _set_registry, _del_registry)
@reify
def response(self):
f = self.registry.queryUtility(IResponseFactory, default=Response)
return f()
have_zca = True
def setUp(registry=None, request=None, hook_zca=True, autocommit=True,
settings=None):
"""
Set :app:`Pyramid` registry and request thread locals for the
duration of a single unit test.
Use this function in the ``setUp`` method of a unittest test case
which directly or indirectly uses:
- any method of the :class:`pyramid.config.Configurator`
object returned by this function.
- the :func:`pyramid.threadlocal.get_current_registry` or
:func:`pyramid.threadlocal.get_current_request` functions.
If you use the ``get_current_*`` functions (or call :app:`Pyramid` code
that uses these functions) without calling ``setUp``,
:func:`pyramid.threadlocal.get_current_registry` will return a *global*
:term:`application registry`, which may cause unit tests to not be
isolated with respect to registrations they perform.
If the ``registry`` argument is ``None``, a new empty
:term:`application registry` will be created (an instance of the
:class:`pyramid.registry.Registry` class). If the ``registry``
argument is not ``None``, the value passed in should be an
instance of the :class:`pyramid.registry.Registry` class or a
suitable testing analogue.
After ``setUp`` is finished, the registry returned by the
:func:`pyramid.threadlocal.get_current_registry` function will
be the passed (or constructed) registry until
:func:`pyramid.testing.tearDown` is called (or
:func:`pyramid.testing.setUp` is called again) .
If the ``hook_zca`` argument is ``True``, ``setUp`` will attempt
to perform the operation ``zope.component.getSiteManager.sethook(
pyramid.threadlocal.get_current_registry)``, which will cause
the :term:`Zope Component Architecture` global API
(e.g. :func:`zope.component.getSiteManager`,
:func:`zope.component.getAdapter`, and so on) to use the registry
constructed by ``setUp`` as the value it returns from
:func:`zope.component.getSiteManager`. If the
:mod:`zope.component` package cannot be imported, or if
``hook_zca`` is ``False``, the hook will not be set.
If ``settings`` is not None, it must be a dictionary representing the
values passed to a Configurator as its ``settings=`` argument.
This function returns an instance of the
:class:`pyramid.config.Configurator` class, which can be
used for further configuration to set up an environment suitable
for a unit or integration test. The ``registry`` attribute
attached to the Configurator instance represents the 'current'
:term:`application registry`; the same registry will be returned
by :func:`pyramid.threadlocal.get_current_registry` during the
execution of the test.
"""
manager.clear()
if registry is None:
registry = Registry('testing')
config = Configurator(registry=registry, autocommit=autocommit)
if settings is None:
settings = {}
if getattr(registry, 'settings', None) is None:
config._set_settings(settings)
if hasattr(registry, 'registerUtility'):
# Sometimes nose calls us with a non-registry object because
# it thinks this function is module test setup. Likewise,
# someone may be passing us an esoteric "dummy" registry, and
# the below won't succeed if it doesn't have a registerUtility
# method.
config.add_default_renderers()
config.add_default_view_predicates()
config.add_default_route_predicates()
config.commit()
global have_zca
try:
have_zca and hook_zca and config.hook_zca()
except ImportError: # pragma: no cover
# (dont choke on not being able to import z.component)
have_zca = False
config.begin(request=request)
return config
def tearDown(unhook_zca=True):
"""Undo the effects of :func:`pyramid.testing.setUp`. Use this
function in the ``tearDown`` method of a unit test that uses
:func:`pyramid.testing.setUp` in its ``setUp`` method.
If the ``unhook_zca`` argument is ``True`` (the default), call
:func:`zope.component.getSiteManager.reset`. This undoes the
action of :func:`pyramid.testing.setUp` when called with the
argument ``hook_zca=True``. If :mod:`zope.component` cannot be
imported, ``unhook_zca`` is set to ``False``.
"""
global have_zca
if unhook_zca and have_zca:
try:
from zope.component import getSiteManager
getSiteManager.reset()
except ImportError: # pragma: no cover
have_zca = False
info = manager.pop()
manager.clear()
if info is not None:
registry = info['registry']
if hasattr(registry, '__init__') and hasattr(registry, '__name__'):
try:
registry.__init__(registry.__name__)
except TypeError:
# calling __init__ is largely for the benefit of
# people who want to use the global ZCA registry;
# however maybe somebody's using a registry we don't
# understand, let's not blow up
pass
def cleanUp(*arg, **kw):
""" An alias for :func:`pyramid.testing.setUp`. """
return setUp(*arg, **kw)
class DummyRendererFactory(object):
""" Registered by
:meth:`pyramid.config.Configurator.testing_add_renderer` as
a dummy renderer factory. The indecision about what to use as a
key (a spec vs. a relative name) is caused by test suites in the
wild believing they can register either. The ``factory`` argument
passed to this constructor is usually the *real* template renderer
factory, found when ``testing_add_renderer`` is called."""
def __init__(self, name, factory):
self.name = name
self.factory = factory # the "real" renderer factory reg'd previously
self.renderers = {}
def add(self, spec, renderer):
self.renderers[spec] = renderer
if ':' in spec:
package, relative = spec.split(':', 1)
self.renderers[relative] = renderer
def __call__(self, info):
spec = info.name
renderer = self.renderers.get(spec)
if renderer is None:
if ':' in spec:
package, relative = spec.split(':', 1)
renderer = self.renderers.get(relative)
if renderer is None:
if self.factory:
renderer = self.factory(info)
else:
raise KeyError('No testing renderer registered for %r' %
spec)
return renderer
class MockTemplate(object):
def __init__(self, response):
self._received = {}
self.response = response
def __getattr__(self, attrname):
return self
def __getitem__(self, attrname):
return self
def __call__(self, *arg, **kw):
self._received.update(kw)
return self.response
def skip_on(*platforms): # pragma: no cover
skip = False
for platform in platforms:
if skip_on.os_name.startswith(platform):
skip = True
if platform == 'pypy' and PYPY:
skip = True
if platform == 'py3' and PY3:
skip = True
def decorator(func):
if isinstance(func, class_types):
if skip: return None
else: return func
else:
def wrapper(*args, **kw):
if skip:
return
return func(*args, **kw)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
return decorator
skip_on.os_name = os.name # for testing
@contextmanager
def testConfig(registry=None,
request=None,
hook_zca=True,
autocommit=True,
settings=None):
"""Returns a context manager for test set up.
This context manager calls :func:`pyramid.testing.setUp` when
entering and :func:`pyramid.testing.tearDown` when exiting.
All arguments are passed directly to :func:`pyramid.testing.setUp`.
If the ZCA is hooked, it will always be un-hooked in tearDown.
This context manager allows you to write test code like this:
.. code-block:: python
:linenos:
with testConfig() as config:
config.add_route('bar', '/bar/{id}')
req = DummyRequest()
resp = myview(req),
"""
config = setUp(registry=registry,
request=request,
hook_zca=hook_zca,
autocommit=autocommit,
settings=settings)
try:
yield config
finally:
tearDown(unhook_zca=hook_zca)
|
|
'''
File to handle motif/expression regression
'''
__author__='Anthony Soltis'
__email__='[email protected]'
import sys,os,pickle,re
from optparse import OptionParser
import numpy as np
from scipy import stats
import fileinput
import matplotlib
matplotlib.use('pdf')
from matplotlib import pyplot as plt
def load_tgm(tgm_fn):
'''
Load tgm file and produce output matrix.
Output is transposed numpy array object.
'''
print 'Loading tgm file...'
tgm = []
for line in fileinput.input(tgm_fn):
l = line.strip('\n').split()
tgm.append(l)
# display results, return array
s = np.asarray(tgm).T.shape
print 'TGM file loaded with %d genes by %d motifs.'%(s[0],s[1])
return np.asarray(tgm).T
def load_ids(ids_fn):
'''
Load ids filename and store as list.
'''
ids = []
for line in fileinput.input(ids_fn):
l = line.strip('\n')
ids.append(l)
return ids
def load_response(data_fn):
'''
Load ydata and return numpy vector.
Input file should have one value per-row.
'''
r_data = []
r_genes = []
for line in fileinput.input(data_fn):
row=line.strip('\n').split('\t')
if len(row)>1:
r_genes.append(row[0])
r_data.append(float(row[1]))
else:
r_data.append(float(row[0]))
# r_data.append(float(line.strip('\n')))
print 'Response data file loaded with %d values.'%(len(r_data))
return np.asarray(r_data),r_genes
def map_data(Xdata,Xnames,Ydata,Ynames):
'''
Map X (predictor) data to Y (response) data using X and Y data ids (i.e. gene names).
'''
# Intersect two gene lists
Xinds = []
Yinds = []
#yn = []
for i,Xgene in enumerate(Xnames):
for j,Ygene in enumerate(Ynames):
if Xgene == Ygene:
Xinds.append(i)
Yinds.append(j)
# yn.append(Ygene)
Xdata_out = Xdata[Xinds,:]
Ydata_out = Ydata[Yinds]
print 'Found %d genes that have binding data and are in the expression output'%(len(Yinds))
return Xdata_out,Ydata_out
def perform_regression(X,Y,motif_ids,norm,outdir,plot):
'''
Linear regression to assess whether X (predicted TF binding affinity)
is predictive of Y (gene expression). Optionally stores figures showing
the regression results for each TF motif.
'''
reg_results = []
for i in range(0,X.shape[1]):
# Set up data
x = np.array(X[:,i],dtype=float)
if norm != None:
if norm == 'log2':
y = np.log2(Y+.1)
elif norm == 'log10':
y = np.log10(Y+.1)
else: y = Y
# Perform regression
slope,intercept,r_val,p_val,std_err = stats.linregress(x,y)
reg_results.append(([motif_ids[i],slope,p_val,i]))
#regression plot
if plot:
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x,y,'bo',x,intercept+slope*x,'k')
ax1.set_title(motif_ids[i])
ax1.set_xlabel('Estimated transcription factor affinity')
ax1.set_ylabel('Expression log fold change')
#checking if a subdirectory is present to save plots
plotdir = os.path.join(os.path.split(outdir)[0],'regression_plots')
if not os.path.isdir(plotdir):
os.makedirs(plotdir)
#cleaning all motif ids to have all alphanumeric name
if not re.match(r'^[A-Za-z0-9.]*$', motif_ids[i]):
motif_ids[i] = "".join(c for c in motif_ids[i] if c not in ('!','$','@','!','%','*','\\','/','_','-'))
#file name must be within max characters supported by os
if len(motif_ids[i])>162:
st = motif_ids[i]
motif_ids[i] = st[0:160]
plotfile = os.path.join(plotdir,motif_ids[i]+'.pdf')
fig.savefig(open(plotfile,'w'),dpi=300)
plt.close()
# First sort by p-value, then by magnitude of the regression slope,
# then by motif id to break ties
# Test case uses the values written to file by the str() conversion
# so sort on the same truncated values
return sorted(reg_results, key=lambda reg_res: (float(str(reg_res[2])), -float(str(abs(reg_res[1]))), reg_res[0]))
def fdr_correction(results):
'''
Compute FDR corrected p-values based on Benjamini-Hochberg procedure.
'''
new_results = []
num_tests = len([r for r in results if str(r[1])!='nan'])
print 'Correcting for '+str(num_tests)+' numeric values'
for i in range(0,num_tests):
tup = results[i]
pval = tup[2]
fdr = num_tests*pval/(i+1)
if fdr > 1.0: fdr = 1.0
tup+=(fdr,)
new_results.append(tup)
return new_results
def main():
usage = "%prog [options] <scores.tgm or scores.tgm.pkl> <response_values.tab>"
description = "Script that takes a predicted TF-Gene matrix and uses a linear regression to identify which TFs have binding scores correlated with gene expression changes."
parser = OptionParser(usage=usage,description=description)
# Options
parser.add_option('--outdir','--out',dest="outdir",default='./test_out.txt',
help='Choose output file name. Default is %default.')
parser.add_option('--motif-ids','--motif-ids',dest='motif_ids',default=None,
help='OPTIONAL: If input file is in text format (.tgm), provide motif ids corresponding to tgm file motifs.')
parser.add_option('--tgm-genes',dest='tgm_genes',default=None,
help='OPTIONAL: If input file is in text format (.tgm), provide gene ids corresponding to tgm file genes.')
parser.add_option('--response-genes',dest='response_genes',default=None,
help='OPTIONAL: If two-column file is not provided, add in gene ids corresponding to response values.')
parser.add_option('--norm-type',dest='norm_type',default=None,
help='Choose normalization type for response data. Choices are: "log2", "log10".\
Default is %default.')
parser.add_option('--use-qval',dest='use_qval',action='store_true',default=False,help='If set this the Forest input file will contain -log(qval) instead of -log(pval) and threshold the output using qval. Default:%default')
parser.add_option('--thresh',dest='thresh',type='string',default='0.9',help='P/Q-Value threshold to illustrate results. Default:%default')
# Cannot set the default here because it depends on opts.outdir
parser.add_option('--gifdir',dest='motifs',default=None,
help='Directory containing motif GIFs to illustrate results. Default is <path_to_this_script>/../data/matrix_files/gifs.')
parser.add_option('--plot',dest='plot',action='store_true',default=False,help='Enable plot generation for regression results. Default:%default')
# get options, arguments
(opts,args) = parser.parse_args()
# Handle arguments
tgm_fn = args[0]
response_data_fn = args[1]
# Load in Y-vector data (gene expression, fold-changes, etc.)
response_data,response_genes = load_response(response_data_fn)
print 'Trying to get file type...'
ext=tgm_fn.split('.')[-1]
if ext.lower()=='pkl':
print '...found PKL file'
pkl=True
else:
print '...found text file, looking for additional data files in options'
pkl=False
# Handle options
outdir = opts.outdir
motif_ids = opts.motif_ids
if motif_ids == None and not pkl:
print 'Must provide motif ids file or use pickled dictionary. Exiting.'
sys.exit()
tgm_genes = opts.tgm_genes
if tgm_genes == None and not pkl:
print 'Must provide gene ids for motifs file or use pickled dictionary. Exiting.'
sys.exit()
# response_genes = opts.response_genes
if opts.response_genes == None and len(response_genes)==0:
print 'Must provide gene ids for response data or have a two-column data file. Exiting.'
sys.exit()
norm_type = opts.norm_type
valid_norm_types = ['log2','log10']
if norm_type != None:
if norm_type not in valid_norm_types:
print 'Normalization type not valid. Exiting.'
sys.exit()
if pkl:
#load in values from dictionary
tgmdict=pickle.load(open(tgm_fn,'rU'))
tgm_data=tgmdict['matrix'].T
motif_ids=tgmdict['tfs']
tgm_genes=tgmdict['genes']
delim=tgmdict['delim']
else:
# Load in transcription factor affinity matrix and IDs
tgm_data = load_tgm(tgm_fn)
motif_ids = load_ids(motif_ids)
tgm_genes = load_ids(tgm_genes)
delim='.'
#now load response_genes if they're not loaded yet
if len(response_genes)==0:
response_genes = load_ids(opts.response_genes)
# Map predictor data to response data
X,Y=map_data(tgm_data,tgm_genes,response_data,response_genes)
# Perform regression
reg_results=perform_regression(X,Y,motif_ids,norm_type,outdir,opts.plot)
# FDR correction
new_results = fdr_correction(reg_results)
dn=os.path.dirname(outdir)
if dn!='' and dn!='./' and not os.path.exists(dn):
os.system('mkdir '+dn)
# Write to TEXT file complete results
of = open(outdir,'w')
of.writelines('\t'.join(['Motif','Slope','p-val','q-val'])+'\n')
for res in new_results:
if str(res[1])=='nan':
continue
ostr = '\t'.join([res[0],str(res[1]),str(res[2]),str(res[4])]) + '\n'
of.writelines(ostr)
of.close()
## Now create HTML writeup
# Set the motif gif directory
if opts.motifs is None:
# Default is <path_to_this_script>/../data/matrix_files/gif
prog_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
motif_dir = os.path.join(prog_dir, '..', 'data', 'matrix_files', 'gifs')
# Make it relative to the output directory
# Note that opts.outdir is the output file name, not a directory
motif_dir = os.path.relpath(motif_dir, os.path.dirname(opts.outdir))
else:
motif_dir = opts.motifs
threshold = float(opts.thresh)
of= open(re.sub(outdir.split('.')[-1],'html',outdir),'w')
of.writelines("""<html>
<title>GARNET Results</title>
<h3>GARNET regression results</h3>
<p>This table includes the results for GARNET TF-motif discovery and regression. This Table includes the non-zero results of the linear regression</p>
<table width="90%">
<tr><th style="width:25%">Motif Cluster</th><th style="width:12%">Slope</th><th style="width:12%">P-value</th><th style="width:12%">Q-value</th><th style="width:35%">LOGO</th></tr>
""")
for res in new_results:
if str(res[1])=='nan':
continue
# skip rows that exceed the q-value or p-value threhsold
if (opts.use_qval and res[4]<=threshold) or ((not opts.use_qval) and res[2]<=threshold):
motifgif=os.path.join(motif_dir,'motif'+str(res[3])+'.gif')
ostr = "<tr><td>"+' '.join(res[0].split('.'))+"</td><td>"+str(res[1])+'</td><td>'+str(res[2])+"</td><td>"+str(res[4])+"</td><td><img src=\""+motifgif+"\" scale=80%></td></tr>\n"
of.writelines(ostr)
of.writelines("</table></html>")
of.close()
##now write to Forest-friendly input file
##collect dictionary of all individual tf names and their regression p-values
##or q-values
regdict={}
for row in new_results:
tfs=[t for t in row[0].split(delim) if t!='' and ' ' not in t]
#print row
if str(row[1])=='nan':
continue
# skip rows that exceed the q-value or p-value threhsold
if opts.use_qval:
if row[4]>threshold:
continue
elif row[2]>threshold:
continue
for tf in tfs:
if row[2]==1:
continue
if opts.use_qval:
lpv=-1.0*np.log2(float(row[4]))#calculate neg log2 qvalue
else:
lpv=-1.0*np.log2(float(row[2]))#calculate neg log2 pvalue
try:
cpv=regdict[tf]
except KeyError:
cpv=0.0
if lpv>cpv:
regdict[tf]=lpv
print 'Found '+str(len(regdict))+'Tf scores for '+str(len(new_results))+' motif results'
of=open(re.sub('.tsv','_FOREST_INPUT.tsv',outdir),'w')
for tf in sorted(regdict.keys()):
val=regdict[tf]
of.write(tf+'\t'+str(val)+'\n')
of.close()
if __name__ == '__main__': main()
|
|
#!/usr/bin/python3
# Creates DNS zone files for all of the domains of all of the mail users
# and mail aliases and restarts nsd.
########################################################################
import os, os.path, urllib.parse, datetime, re, hashlib, base64
import ipaddress
import rtyaml
import dns.resolver
from mailconfig import get_mail_domains
from utils import shell, load_env_vars_from_file, safe_domain_name, sort_domains
def get_dns_domains(env):
# Add all domain names in use by email users and mail aliases and ensure
# PRIMARY_HOSTNAME is in the list.
domains = set()
domains |= get_mail_domains(env)
domains.add(env['PRIMARY_HOSTNAME'])
return domains
def get_dns_zones(env):
# What domains should we create DNS zones for? Never create a zone for
# a domain & a subdomain of that domain.
domains = get_dns_domains(env)
# Exclude domains that are subdomains of other domains we know. Proceed
# by looking at shorter domains first.
zone_domains = set()
for domain in sorted(domains, key=lambda d : len(d)):
for d in zone_domains:
if domain.endswith("." + d):
# We found a parent domain already in the list.
break
else:
# 'break' did not occur: there is no parent domain.
zone_domains.add(domain)
# Make a nice and safe filename for each domain.
zonefiles = []
for domain in zone_domains:
zonefiles.append([domain, safe_domain_name(domain) + ".txt"])
# Sort the list so that the order is nice and so that nsd.conf has a
# stable order so we don't rewrite the file & restart the service
# meaninglessly.
zone_order = sort_domains([ zone[0] for zone in zonefiles ], env)
zonefiles.sort(key = lambda zone : zone_order.index(zone[0]) )
return zonefiles
def get_custom_dns_config(env):
try:
return rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
except:
return { }
def write_custom_dns_config(config, env):
config_yaml = rtyaml.dump(config)
with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w") as f:
f.write(config_yaml)
def do_dns_update(env, force=False):
# What domains (and their zone filenames) should we build?
domains = get_dns_domains(env)
zonefiles = get_dns_zones(env)
# Custom records to add to zones.
additional_records = get_custom_dns_config(env)
# Write zone files.
os.makedirs('/etc/nsd/zones', exist_ok=True)
updated_domains = []
for i, (domain, zonefile) in enumerate(zonefiles):
# Build the records to put in the zone.
records = build_zone(domain, domains, additional_records, env)
# See if the zone has changed, and if so update the serial number
# and write the zone file.
if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records, env, force):
# Zone was not updated. There were no changes.
continue
# If this is a .justtesting.email domain, then post the update.
try:
justtestingdotemail(domain, records)
except:
# Hmm. Might be a network issue. If we stop now, will we end
# up in an inconsistent state? Let's just continue.
pass
# Mark that we just updated this domain.
updated_domains.append(domain)
# Sign the zone.
#
# Every time we sign the zone we get a new result, which means
# we can't sign a zone without bumping the zone's serial number.
# Thus we only sign a zone if write_nsd_zone returned True
# indicating the zone changed, and thus it got a new serial number.
# write_nsd_zone is smart enough to check if a zone's signature
# is nearing expiration and if so it'll bump the serial number
# and return True so we get a chance to re-sign it.
sign_zone(domain, zonefile, env)
# Now that all zones are signed (some might not have changed and so didn't
# just get signed now, but were before) update the zone filename so nsd.conf
# uses the signed file.
for i in range(len(zonefiles)):
zonefiles[i][1] += ".signed"
# Write the main nsd.conf file.
if write_nsd_conf(zonefiles, additional_records, env):
# Make sure updated_domains contains *something* if we wrote an updated
# nsd.conf so that we know to restart nsd.
if len(updated_domains) == 0:
updated_domains.append("DNS configuration")
# Kick nsd if anything changed.
if len(updated_domains) > 0:
shell('check_call', ["/usr/sbin/service", "nsd", "restart"])
# Write the OpenDKIM configuration tables.
if write_opendkim_tables(domains, env):
# Settings changed. Kick opendkim.
shell('check_call', ["/usr/sbin/service", "opendkim", "restart"])
if len(updated_domains) == 0:
# If this is the only thing that changed?
updated_domains.append("OpenDKIM configuration")
if len(updated_domains) == 0:
# if nothing was updated (except maybe OpenDKIM's files), don't show any output
return ""
else:
return "updated DNS: " + ",".join(updated_domains) + "\n"
########################################################################
def build_zone(domain, all_domains, additional_records, env, is_zone=True):
records = []
# For top-level zones, define the authoritative name servers.
#
# Normally we are our own nameservers. Some TLDs require two distinct IP addresses,
# so we allow the user to override the second nameserver definition so that
# secondary DNS can be set up elsewhere.
#
# 'False' in the tuple indicates these records would not be used if the zone
# is managed outside of the box.
if is_zone:
# Obligatory definition of ns1.PRIMARY_HOSTNAME.
records.append((None, "NS", "ns1.%s." % env["PRIMARY_HOSTNAME"], False))
# Define ns2.PRIMARY_HOSTNAME or whatever the user overrides.
secondary_ns = additional_records.get("_secondary_nameserver", "ns2." + env["PRIMARY_HOSTNAME"])
records.append((None, "NS", secondary_ns+'.', False))
# In PRIMARY_HOSTNAME...
if domain == env["PRIMARY_HOSTNAME"]:
# Define ns1 and ns2.
# 'False' in the tuple indicates these records would not be used if the zone
# is managed outside of the box.
records.append(("ns1", "A", env["PUBLIC_IP"], False))
records.append(("ns2", "A", env["PUBLIC_IP"], False))
if env.get('PUBLIC_IPV6'):
records.append(("ns1", "AAAA", env["PUBLIC_IPV6"], False))
records.append(("ns2", "AAAA", env["PUBLIC_IPV6"], False))
# Set the A/AAAA records. Do this early for the PRIMARY_HOSTNAME so that the user cannot override them
# and we can provide different explanatory text.
records.append((None, "A", env["PUBLIC_IP"], "Required. Sets the IP address of the box."))
if env.get("PUBLIC_IPV6"): records.append((None, "AAAA", env["PUBLIC_IPV6"], "Required. Sets the IPv6 address of the box."))
# Add a DANE TLSA record for SMTP.
records.append(("_25._tcp", "TLSA", build_tlsa_record(env), "Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used."))
# Add a SSHFP records to help SSH key validation. One per available SSH key on this system.
for value in build_sshfp_records():
records.append((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh."))
# The MX record says where email for the domain should be delivered: Here!
records.append((None, "MX", "10 %s." % env["PRIMARY_HOSTNAME"], "Required. Specifies the hostname (and priority) of the machine that handles @%s mail." % domain))
# SPF record: Permit the box ('mx', see above) to send mail on behalf of
# the domain, and no one else.
records.append((None, "TXT", 'v=spf1 mx -all', "Recommended. Specifies that only the box is permitted to send @%s mail." % domain))
# Add DNS records for any subdomains of this domain. We should not have a zone for
# both a domain and one of its subdomains.
subdomains = [d for d in all_domains if d.endswith("." + domain)]
for subdomain in subdomains:
subdomain_qname = subdomain[0:-len("." + domain)]
subzone = build_zone(subdomain, [], additional_records, env, is_zone=False)
for child_qname, child_rtype, child_value, child_explanation in subzone:
if child_qname == None:
child_qname = subdomain_qname
else:
child_qname += "." + subdomain_qname
records.append((child_qname, child_rtype, child_value, child_explanation))
def has_rec(qname, rtype, prefix=None):
for rec in records:
if rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)):
return True
return False
# The user may set other records that don't conflict with our settings.
for qname, rtype, value in get_custom_records(domain, additional_records, env):
if has_rec(qname, rtype): continue
records.append((qname, rtype, value, "(Set by user.)"))
# Add defaults if not overridden by the user's custom settings (and not otherwise configured).
# Any "CNAME" record on the qname overrides A and AAAA.
defaults = [
(None, "A", env["PUBLIC_IP"], "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain),
("www", "A", env["PUBLIC_IP"], "Optional. Sets the IP address that www.%s resolves to, e.g. for web hosting." % domain),
(None, "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)" % domain),
("www", "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that www.%s resolves to, e.g. for web hosting." % domain),
]
for qname, rtype, value, explanation in defaults:
if value is None or value.strip() == "": continue # skip IPV6 if not set
if not is_zone and qname == "www": continue # don't create any default 'www' subdomains on what are themselves subdomains
# Set the default record, but not if:
# (1) there is not a user-set record of the same type already
# (2) there is not a CNAME record already, since you can't set both and who knows what takes precedence
# (2) there is not an A record already (if this is an A record this is a dup of (1), and if this is an AAAA record then don't set a default AAAA record if the user sets a custom A record, since the default wouldn't make sense and it should not resolve if the user doesn't provide a new AAAA record)
if not has_rec(qname, rtype) and not has_rec(qname, "CNAME") and not has_rec(qname, "A"):
records.append((qname, rtype, value, explanation))
# Append the DKIM TXT record to the zone as generated by OpenDKIM.
opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt')
with open(opendkim_record_file) as orf:
m = re.match(r'(\S+)\s+IN\s+TXT\s+\( "([^"]+)"\s+"([^"]+)"\s*\)', orf.read(), re.S)
val = m.group(2) + m.group(3)
records.append((m.group(1), "TXT", val, "Recommended. Provides a way for recipients to verify that this machine sent @%s mail." % domain))
# Append a DMARC record.
records.append(("_dmarc", "TXT", 'v=DMARC1; p=quarantine', "Optional. Specifies that mail that does not originate from the box but claims to be from @%s is suspect and should be quarantined by the recipient's mail system." % domain))
# For any subdomain with an A record but no SPF or DMARC record, add strict policy records.
all_resolvable_qnames = set(r[0] for r in records if r[1] in ("A", "AAAA"))
for qname in all_resolvable_qnames:
if not has_rec(qname, "TXT", prefix="v=spf1 "):
records.append((qname, "TXT", 'v=spf1 a mx -all', "Prevents unauthorized use of this domain name for outbound mail by requiring outbound mail to originate from the indicated host(s)."))
dmarc_qname = "_dmarc" + ("" if qname is None else "." + qname)
if not has_rec(dmarc_qname, "TXT", prefix="v=DMARC1; "):
records.append((dmarc_qname, "TXT", 'v=DMARC1; p=reject', "Prevents unauthorized use of this domain name for outbound mail by requiring a valid DKIM signature."))
# Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter.
records.sort(key = lambda rec : list(reversed(rec[0].split(".")) if rec[0] is not None else ""))
return records
########################################################################
def get_custom_records(domain, additional_records, env):
for qname, value in additional_records.items():
# We don't count the secondary nameserver config (if present) as a record - that would just be
# confusing to users. Instead it is accessed/manipulated directly via (get/set)_custom_dns_config.
if qname == "_secondary_nameserver": continue
# Is this record for the domain or one of its subdomains?
# If `domain` is None, return records for all domains.
if domain is not None and qname != domain and not qname.endswith("." + domain): continue
# Turn the fully qualified domain name in the YAML file into
# our short form (None => domain, or a relative QNAME) if
# domain is not None.
if domain is not None:
if qname == domain:
qname = None
else:
qname = qname[0:len(qname)-len("." + domain)]
# Short form. Mapping a domain name to a string is short-hand
# for creating A records.
if isinstance(value, str):
values = [("A", value)]
if value == "local" and env.get("PUBLIC_IPV6"):
values.append( ("AAAA", value) )
# A mapping creates multiple records.
elif isinstance(value, dict):
values = value.items()
# No other type of data is allowed.
else:
raise ValueError()
for rtype, value2 in values:
# The "local" keyword on A/AAAA records are short-hand for our own IP.
# This also flags for web configuration that the user wants a website here.
if rtype == "A" and value2 == "local":
value2 = env["PUBLIC_IP"]
if rtype == "AAAA" and value2 == "local":
if "PUBLIC_IPV6" not in env: continue # no IPv6 address is available so don't set anything
value2 = env["PUBLIC_IPV6"]
yield (qname, rtype, value2)
########################################################################
def build_tlsa_record(env):
# A DANE TLSA record in DNS specifies that connections on a port
# must use TLS and the certificate must match a particular certificate.
#
# Thanks to http://blog.huque.com/2012/10/dnssec-and-certificates.html
# for explaining all of this!
# Get the hex SHA256 of the DER-encoded server certificate:
certder = shell("check_output", [
"/usr/bin/openssl",
"x509",
"-in", os.path.join(env["STORAGE_ROOT"], "ssl", "ssl_certificate.pem"),
"-outform", "DER"
],
return_bytes=True)
certhash = hashlib.sha256(certder).hexdigest()
# Specify the TLSA parameters:
# 3: This is the certificate that the client should trust. No CA is needed.
# 0: The whole certificate is matched.
# 1: The certificate is SHA256'd here.
return "3 0 1 " + certhash
def build_sshfp_records():
# The SSHFP record is a way for us to embed this server's SSH public
# key fingerprint into the DNS so that remote hosts have an out-of-band
# method to confirm the fingerprint. See RFC 4255 and RFC 6594. This
# depends on DNSSEC.
#
# On the client side, set SSH's VerifyHostKeyDNS option to 'ask' to
# include this info in the key verification prompt or 'yes' to trust
# the SSHFP record.
#
# See https://github.com/xelerance/sshfp for inspiriation.
algorithm_number = {
"ssh-rsa": 1,
"ssh-dss": 2,
"ecdsa-sha2-nistp256": 3,
}
# Get our local fingerprints by running ssh-keyscan. The output looks
# like the known_hosts file: hostname, keytype, fingerprint. The order
# of the output is arbitrary, so sort it to prevent spurrious updates
# to the zone file (that trigger bumping the serial number).
keys = shell("check_output", ["ssh-keyscan", "localhost"])
for key in sorted(keys.split("\n")):
if key.strip() == "" or key[0] == "#": continue
try:
host, keytype, pubkey = key.split(" ")
yield "%d %d ( %s )" % (
algorithm_number[keytype],
2, # specifies we are using SHA-256 on next line
hashlib.sha256(base64.b64decode(pubkey)).hexdigest().upper(),
)
except:
# Lots of things can go wrong. Don't let it disturb the DNS
# zone.
pass
########################################################################
def write_nsd_zone(domain, zonefile, records, env, force):
# On the $ORIGIN line, there's typically a ';' comment at the end explaining
# what the $ORIGIN line does. Any further data after the domain confuses
# ldns-signzone, however. It used to say '; default zone domain'.
# The SOA contact address for all of the domains on this system is hostmaster
# @ the PRIMARY_HOSTNAME. Hopefully that's legit.
# For the refresh through TTL fields, a good reference is:
# http://www.peerwisdom.org/2013/05/15/dns-understanding-the-soa-record/
zone = """
$ORIGIN {domain}.
$TTL 1800 ; default time to live
@ IN SOA ns1.{primary_domain}. hostmaster.{primary_domain}. (
__SERIAL__ ; serial number
7200 ; Refresh (secondary nameserver update interval)
1800 ; Retry (when refresh fails, how often to try again)
1209600 ; Expire (when refresh fails, how long secondary nameserver will keep records around anyway)
1800 ; Negative TTL (how long negative responses are cached)
)
"""
# Replace replacement strings.
zone = zone.format(domain=domain.encode("idna").decode("ascii"), primary_domain=env["PRIMARY_HOSTNAME"].encode("idna").decode("ascii"))
# Add records.
for subdomain, querytype, value, explanation in records:
if subdomain:
zone += subdomain.encode("idna").decode("ascii")
zone += "\tIN\t" + querytype + "\t"
if querytype == "TXT":
# Quote and escape.
value = value.replace('\\', '\\\\') # escape backslashes
value = value.replace('"', '\\"') # escape quotes
value = '"' + value + '"' # wrap in quotes
elif querytype in ("NS", "CNAME"):
# These records must be IDNA-encoded.
value = value.encode("idna").decode("ascii")
elif querytype == "MX":
# Also IDNA-encoded, but must parse first.
priority, host = value.split(" ", 1)
host = host.encode("idna").decode("ascii")
value = priority + " " + host
zone += value + "\n"
# DNSSEC requires re-signing a zone periodically. That requires
# bumping the serial number even if no other records have changed.
# We don't see the DNSSEC records yet, so we have to figure out
# if a re-signing is necessary so we can prematurely bump the
# serial number.
force_bump = False
if not os.path.exists(zonefile + ".signed"):
# No signed file yet. Shouldn't normally happen unless a box
# is going from not using DNSSEC to using DNSSEC.
force_bump = True
else:
# We've signed the domain. Check if we are close to the expiration
# time of the signature. If so, we'll force a bump of the serial
# number so we can re-sign it.
with open(zonefile + ".signed") as f:
signed_zone = f.read()
expiration_times = re.findall(r"\sRRSIG\s+SOA\s+\d+\s+\d+\s\d+\s+(\d{14})", signed_zone)
if len(expiration_times) == 0:
# weird
force_bump = True
else:
# All of the times should be the same, but if not choose the soonest.
expiration_time = min(expiration_times)
expiration_time = datetime.datetime.strptime(expiration_time, "%Y%m%d%H%M%S")
if expiration_time - datetime.datetime.now() < datetime.timedelta(days=3):
# We're within three days of the expiration, so bump serial & resign.
force_bump = True
# Set the serial number.
serial = datetime.datetime.now().strftime("%Y%m%d00")
if os.path.exists(zonefile):
# If the zone already exists, is different, and has a later serial number,
# increment the number.
with open(zonefile) as f:
existing_zone = f.read()
m = re.search(r"(\d+)\s*;\s*serial number", existing_zone)
if m:
# Clear out the serial number in the existing zone file for the
# purposes of seeing if anything *else* in the zone has changed.
existing_serial = m.group(1)
existing_zone = existing_zone.replace(m.group(0), "__SERIAL__ ; serial number")
# If the existing zone is the same as the new zone (modulo the serial number),
# there is no need to update the file. Unless we're forcing a bump.
if zone == existing_zone and not force_bump and not force:
return False
# If the existing serial is not less than a serial number
# based on the current date plus 00, increment it. Otherwise,
# the serial number is less than our desired new serial number
# so we'll use the desired new number.
if existing_serial >= serial:
serial = str(int(existing_serial) + 1)
zone = zone.replace("__SERIAL__", serial)
# Write the zone file.
with open(zonefile, "w") as f:
f.write(zone)
return True # file is updated
########################################################################
def write_nsd_conf(zonefiles, additional_records, env):
# Basic header.
nsdconf = """
server:
hide-version: yes
# identify the server (CH TXT ID.SERVER entry).
identity: ""
# The directory for zonefile: files.
zonesdir: "/etc/nsd/zones"
"""
# Since we have bind9 listening on localhost for locally-generated
# DNS queries that require a recursive nameserver, and the system
# might have other network interfaces for e.g. tunnelling, we have
# to be specific about the network interfaces that nsd binds to.
for ipaddr in (env.get("PRIVATE_IP", "") + " " + env.get("PRIVATE_IPV6", "")).split(" "):
if ipaddr == "": continue
nsdconf += " ip-address: %s\n" % ipaddr
# Append the zones.
for domain, zonefile in zonefiles:
nsdconf += """
zone:
name: %s
zonefile: %s
""" % (domain.encode("idna").decode("ascii"), zonefile)
# If a custom secondary nameserver has been set, allow zone transfers
# and notifies to that nameserver.
if additional_records.get("_secondary_nameserver"):
# Get the IP address of the nameserver by resolving it.
hostname = additional_records.get("_secondary_nameserver")
resolver = dns.resolver.get_default_resolver()
response = dns.resolver.query(hostname+'.', "A")
ipaddr = str(response[0])
nsdconf += """\tnotify: %s NOKEY
provide-xfr: %s NOKEY
""" % (ipaddr, ipaddr)
# Check if the nsd.conf is changing. If it isn't changing,
# return False to flag that no change was made.
with open("/etc/nsd/nsd.conf") as f:
if f.read() == nsdconf:
return False
with open("/etc/nsd/nsd.conf", "w") as f:
f.write(nsdconf)
return True
########################################################################
def dnssec_choose_algo(domain, env):
if '.' in domain and domain.rsplit('.')[-1] in \
("email", "guide", "fund"):
# At GoDaddy, RSASHA256 is the only algorithm supported
# for .email and .guide.
# A variety of algorithms are supported for .fund. This
# is preferred.
return "RSASHA256"
# For any domain we were able to sign before, don't change the algorithm
# on existing users. We'll probably want to migrate to SHA256 later.
return "RSASHA1-NSEC3-SHA1"
def sign_zone(domain, zonefile, env):
algo = dnssec_choose_algo(domain, env)
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % algo))
# From here, use the IDNA encoding of the domain name.
domain = domain.encode("idna").decode("ascii")
# In order to use the same keys for all domains, we have to generate
# a new .key file with a DNSSEC record for the specific domain. We
# can reuse the same key, but it won't validate without a DNSSEC
# record specifically for the domain.
#
# Copy the .key and .private files to /tmp to patch them up.
#
# Use os.umask and open().write() to securely create a copy that only
# we (root) can read.
files_to_kill = []
for key in ("KSK", "ZSK"):
if dnssec_keys.get(key, "").strip() == "": raise Exception("DNSSEC is not properly set up.")
oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys[key])
newkeyfn = '/tmp/' + dnssec_keys[key].replace("_domain_", domain)
dnssec_keys[key] = newkeyfn
for ext in (".private", ".key"):
if not os.path.exists(oldkeyfn + ext): raise Exception("DNSSEC is not properly set up.")
with open(oldkeyfn + ext, "r") as fr:
keydata = fr.read()
keydata = keydata.replace("_domain_", domain) # trick ldns-signkey into letting our generic key be used by this zone
fn = newkeyfn + ext
prev_umask = os.umask(0o77) # ensure written file is not world-readable
try:
with open(fn, "w") as fw:
fw.write(keydata)
finally:
os.umask(prev_umask) # other files we write should be world-readable
files_to_kill.append(fn)
# Do the signing.
expiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y%m%d")
shell('check_call', ["/usr/bin/ldns-signzone",
# expire the zone after 30 days
"-e", expiry_date,
# use NSEC3
"-n",
# zonefile to sign
"/etc/nsd/zones/" + zonefile,
# keys to sign with (order doesn't matter -- it'll figure it out)
dnssec_keys["KSK"],
dnssec_keys["ZSK"],
])
# Create a DS record based on the patched-up key files. The DS record is specific to the
# zone being signed, so we can't use the .ds files generated when we created the keys.
# The DS record points to the KSK only. Write this next to the zone file so we can
# get it later to give to the user with instructions on what to do with it.
#
# We want to be able to validate DS records too, but multiple forms may be valid depending
# on the digest type. So we'll write all (both) valid records. Only one DS record should
# actually be deployed. Preferebly the first.
with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f:
for digest_type in ('2', '1'):
rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds",
"-n", # output to stdout
"-" + digest_type, # 1=SHA1, 2=SHA256
dnssec_keys["KSK"] + ".key"
])
f.write(rr_ds)
# Remove our temporary file.
for fn in files_to_kill:
os.unlink(fn)
########################################################################
def write_opendkim_tables(domains, env):
# Append a record to OpenDKIM's KeyTable and SigningTable for each domain
# that we send mail from (zones and all subdomains).
opendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')
if not os.path.exists(opendkim_key_file):
# Looks like OpenDKIM is not installed.
return False
config = {
# The SigningTable maps email addresses to a key in the KeyTable that
# specifies signing information for matching email addresses. Here we
# map each domain to a same-named key.
#
# Elsewhere we set the DMARC policy for each domain such that mail claiming
# to be From: the domain must be signed with a DKIM key on the same domain.
# So we must have a separate KeyTable entry for each domain.
"SigningTable":
"".join(
"*@{domain} {domain}\n".format(domain=domain)
for domain in domains
),
# The KeyTable specifies the signing domain, the DKIM selector, and the
# path to the private key to use for signing some mail. Per DMARC, the
# signing domain must match the sender's From: domain.
"KeyTable":
"".join(
"{domain} {domain}:mail:{key_file}\n".format(domain=domain, key_file=opendkim_key_file)
for domain in domains
),
}
did_update = False
for filename, content in config.items():
# Don't write the file if it doesn't need an update.
if os.path.exists("/etc/opendkim/" + filename):
with open("/etc/opendkim/" + filename) as f:
if f.read() == content:
continue
# The contents needs to change.
with open("/etc/opendkim/" + filename, "w") as f:
f.write(content)
did_update = True
# Return whether the files changed. If they didn't change, there's
# no need to kick the opendkim process.
return did_update
########################################################################
def set_custom_dns_record(qname, rtype, value, env):
# validate qname
for zone, fn in get_dns_zones(env):
# It must match a zone apex or be a subdomain of a zone
# that we are otherwise hosting.
if qname == zone or qname.endswith("."+zone):
break
else:
# No match.
raise ValueError("%s is not a domain name or a subdomain of a domain name managed by this box." % qname)
# validate rtype
rtype = rtype.upper()
if value is not None:
if rtype in ("A", "AAAA"):
v = ipaddress.ip_address(value)
if rtype == "A" and not isinstance(v, ipaddress.IPv4Address): raise ValueError("That's an IPv6 address.")
if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address): raise ValueError("That's an IPv4 address.")
elif rtype in ("CNAME", "TXT", "SRV"):
# anything goes
pass
else:
raise ValueError("Unknown record type '%s'." % rtype)
# load existing config
config = get_custom_dns_config(env)
# update
if qname not in config:
if value is None:
# Is asking to delete a record that does not exist.
return False
elif rtype == "A":
# Add this record using the short form 'qname: value'.
config[qname] = value
else:
# Add this record. This is the qname's first record.
config[qname] = { rtype: value }
else:
if isinstance(config[qname], str):
# This is a short-form 'qname: value' implicit-A record.
if value is None and rtype != "A":
# Is asking to delete a record that doesn't exist.
return False
elif value is None and rtype == "A":
# Delete record.
del config[qname]
elif rtype == "A":
# Update, keeping short form.
if config[qname] == "value":
# No change.
return False
config[qname] = value
else:
# Expand short form so we can add a new record type.
config[qname] = { "A": config[qname], rtype: value }
else:
# This is the qname: { ... } (dict) format.
if value is None:
if rtype not in config[qname]:
# Is asking to delete a record that doesn't exist.
return False
else:
# Delete the record. If it's the last record, delete the domain.
del config[qname][rtype]
if len(config[qname]) == 0:
del config[qname]
else:
# Update the record.
if config[qname].get(rtype) == "value":
# No change.
return False
config[qname][rtype] = value
# serialize & save
write_custom_dns_config(config, env)
return True
########################################################################
def set_secondary_dns(hostname, env):
config = get_custom_dns_config(env)
if hostname in (None, ""):
# Clear.
if "_secondary_nameserver" in config:
del config["_secondary_nameserver"]
else:
# Validate.
hostname = hostname.strip().lower()
resolver = dns.resolver.get_default_resolver()
try:
response = dns.resolver.query(hostname, "A")
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
raise ValueError("Could not resolve the IP address of %s." % hostname)
# Set.
config["_secondary_nameserver"] = hostname
# Save and apply.
write_custom_dns_config(config, env)
return do_dns_update(env)
########################################################################
def justtestingdotemail(domain, records):
# If the domain is a subdomain of justtesting.email, which we own,
# automatically populate the zone where it is set up on dns4e.com.
# Ideally if dns4e.com supported NS records we would just have it
# delegate DNS to us, but instead we will populate the whole zone.
import subprocess, json, urllib.parse
if not domain.endswith(".justtesting.email"):
return
for subdomain, querytype, value, explanation in records:
if querytype in ("NS",): continue
if subdomain in ("www", "ns1", "ns2"): continue # don't do unnecessary things
if subdomain == None:
subdomain = domain
else:
subdomain = subdomain + "." + domain
if querytype == "TXT":
# nsd requires parentheses around txt records with multiple parts,
# but DNS4E requires there be no parentheses; also it goes into
# nsd with a newline and a tab, which we replace with a space here
value = re.sub("^\s*\(\s*([\w\W]*)\)", r"\1", value)
value = re.sub("\s+", " ", value)
else:
continue
print("Updating DNS for %s/%s..." % (subdomain, querytype))
resp = json.loads(subprocess.check_output([
"curl",
"-s",
"https://api.dns4e.com/v7/%s/%s" % (urllib.parse.quote(subdomain), querytype.lower()),
"--user", "2ddbd8e88ed1495fa0ec:A97TDJV26CVUJS6hqAs0CKnhj4HvjTM7MwAAg8xb",
"--data", "record=%s" % urllib.parse.quote(value),
]).decode("utf8"))
print("\t...", resp.get("message", "?"))
########################################################################
def build_recommended_dns(env):
ret = []
domains = get_dns_domains(env)
zonefiles = get_dns_zones(env)
additional_records = get_custom_dns_config(env)
for domain, zonefile in zonefiles:
records = build_zone(domain, domains, additional_records, env)
# remove records that we don't dislay
records = [r for r in records if r[3] is not False]
# put Required at the top, then Recommended, then everythiing else
records.sort(key = lambda r : 0 if r[3].startswith("Required.") else (1 if r[3].startswith("Recommended.") else 2))
# expand qnames
for i in range(len(records)):
if records[i][0] == None:
qname = domain
else:
qname = records[i][0] + "." + domain
records[i] = {
"qname": qname,
"rtype": records[i][1],
"value": records[i][2],
"explanation": records[i][3],
}
# return
ret.append((domain, records))
return ret
if __name__ == "__main__":
from utils import load_environment
env = load_environment()
for zone, records in build_recommended_dns(env):
for record in records:
print("; " + record['explanation'])
print(record['qname'], record['rtype'], record['value'], sep="\t")
print()
|
|
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import functools
from collections import defaultdict
import numpy
import numpy.random
from nose import SkipTest
from nose.tools import (
assert_true,
assert_equal,
assert_less,
assert_greater,
assert_is_instance,
)
from distributions.dbg.random import sample_discrete
from distributions.util import discrete_goodness_of_fit
from distributions.tests.util import (
require_cython,
seed_all,
assert_hasattr,
assert_close,
)
from distributions.dbg.random import scores_to_probs
import distributions.dbg.clustering
require_cython()
import distributions.lp.clustering
from distributions.lp.clustering import count_assignments
from distributions.lp.mixture import MixtureIdTracker
MODELS = {
'dbg.LowEntropy': distributions.dbg.clustering.LowEntropy,
'lp.PitmanYor': distributions.lp.clustering.PitmanYor,
'lp.LowEntropy': distributions.lp.clustering.LowEntropy,
}
SKIP_EXPENSIVE_TESTS = False
SAMPLE_COUNT = 2000
MIN_GOODNESS_OF_FIT = 1e-3
def iter_examples(Model):
assert_hasattr(Model, 'EXAMPLES')
EXAMPLES = Model.EXAMPLES
assert_is_instance(EXAMPLES, list)
assert_true(EXAMPLES, 'no examples provided')
for i, EXAMPLE in enumerate(EXAMPLES):
print 'example {}/{}'.format(1 + i, len(Model.EXAMPLES))
yield EXAMPLE
def for_each_model(*filters):
'''
Run one test per Model, filtering out inappropriate Models for test.
'''
def filtered(test_fun):
@functools.wraps(test_fun)
def test_one_model(name):
Model = MODELS[name]
for EXAMPLE in iter_examples(Model):
seed_all(0)
if SKIP_EXPENSIVE_TESTS and name.startswith('dbg'):
sample_count = SAMPLE_COUNT / 10
else:
sample_count = SAMPLE_COUNT
test_fun(Model, EXAMPLE, sample_count)
@functools.wraps(test_fun)
def test_all_models():
for name, Model in sorted(MODELS.iteritems()):
if all(f(Model) for f in filters):
yield test_one_model, name
return test_all_models
return filtered
def canonicalize(assignments):
groups = defaultdict(lambda: [])
for value, group in enumerate(assignments):
groups[group].append(value)
result = []
for group in groups.itervalues():
group.sort()
result.append(tuple(group))
result.sort()
return tuple(result)
@for_each_model()
def test_load_and_dump(Model, EXAMPLE, *unused):
model = Model()
model.load(EXAMPLE)
expected = EXAMPLE
actual = model.dump()
assert_close(expected, actual)
def iter_valid_sizes(example, max_size, min_size=2):
max_size = 5
dataset_size = example.get('dataset_size', float('inf'))
sizes = [
size
for size in xrange(min_size, max_size + 1)
if size <= dataset_size
]
assert sizes, 'no valid sizes to test'
for size in sizes:
print 'sample_size = {}'.format(size)
yield size
@for_each_model()
def test_sample_matches_score_counts(Model, EXAMPLE, sample_count):
for size in iter_valid_sizes(EXAMPLE, max_size=10):
model = Model()
model.load(EXAMPLE)
samples = []
probs_dict = {}
for _ in xrange(sample_count):
value = model.sample_assignments(size)
sample = canonicalize(value)
samples.append(sample)
if sample not in probs_dict:
assignments = dict(enumerate(value))
counts = count_assignments(assignments)
prob = math.exp(model.score_counts(counts))
probs_dict[sample] = prob
# renormalize here; test normalization separately
total = sum(probs_dict.values())
for key in probs_dict:
probs_dict[key] /= total
gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
print '{} gof = {:0.3g}'.format(Model.__name__, gof)
assert_greater(gof, MIN_GOODNESS_OF_FIT)
@for_each_model()
def test_score_counts_is_normalized(Model, EXAMPLE, sample_count):
for sample_size in iter_valid_sizes(EXAMPLE, max_size=10):
model = Model()
model.load(EXAMPLE)
if Model.__name__ == 'LowEntropy' and sample_size < model.dataset_size:
print 'WARNING LowEntropy.score_counts normalization is imprecise'
print ' when sample_size < dataset_size'
tol = 0.5
else:
tol = 0.01
probs_dict = {}
for _ in xrange(sample_count):
value = model.sample_assignments(sample_size)
sample = canonicalize(value)
if sample not in probs_dict:
assignments = dict(enumerate(value))
counts = count_assignments(assignments)
prob = math.exp(model.score_counts(counts))
probs_dict[sample] = prob
total = sum(probs_dict.values())
assert_less(abs(total - 1), tol, 'not normalized: {}'.format(total))
def add_to_counts(counts, pos):
counts = counts[:]
counts[pos] += 1
return counts
@for_each_model()
def test_score_add_value_matches_score_counts(Model, EXAMPLE, sample_count):
for sample_size in iter_valid_sizes(EXAMPLE, min_size=2, max_size=10):
model = Model()
model.load(EXAMPLE)
samples = set(
canonicalize(model.sample_assignments(sample_size - 1))
for _ in xrange(sample_count)
)
for sample in samples:
nonempty_group_count = len(sample)
counts = map(len, sample)
actual = numpy.zeros(len(counts) + 1)
expected = numpy.zeros(len(counts) + 1)
# add to existing group
for i, group in enumerate(sample):
group_size = len(sample[i])
expected[i] = model.score_counts(add_to_counts(counts, i))
actual[i] = model.score_add_value(
group_size,
nonempty_group_count,
sample_size - 1)
# add to new group
i = len(counts)
group_size = 0
expected[i] = model.score_counts(counts + [1])
actual[i] = model.score_add_value(
group_size,
nonempty_group_count,
sample_size - 1)
actual = scores_to_probs(actual)
expected = scores_to_probs(expected)
print actual, expected
assert_close(actual, expected, tol=0.05)
@for_each_model(lambda Model: hasattr(Model, 'Mixture'))
def test_mixture_score_matches_score_add_value(Model, EXAMPLE, *unused):
sample_count = 200
model = Model()
model.load(EXAMPLE)
if Model.__name__ == 'LowEntropy' and sample_count > model.dataset_size:
raise SkipTest('skipping trivial example')
assignment_vector = model.sample_assignments(sample_count)
assignments = dict(enumerate(assignment_vector))
nonempty_counts = count_assignments(assignments)
nonempty_group_count = len(nonempty_counts)
assert_greater(nonempty_group_count, 1, "test is inaccurate")
def check_counts(mixture, counts, empty_group_count):
#print 'counts =', counts
empty_groupids = frozenset(mixture.empty_groupids)
assert_equal(len(empty_groupids), empty_group_count)
for groupid in empty_groupids:
assert_equal(counts[groupid], 0)
def check_scores(mixture, counts, empty_group_count):
sample_count = sum(counts)
nonempty_group_count = len(counts) - empty_group_count
expected = [
model.score_add_value(
group_size,
nonempty_group_count,
sample_count,
empty_group_count)
for group_size in counts
]
noise = numpy.random.randn(len(counts))
actual = numpy.zeros(len(counts), dtype=numpy.float32)
actual[:] = noise
mixture.score_value(model, actual)
assert_close(actual, expected)
return actual
for empty_group_count in [1, 10]:
print 'empty_group_count =', empty_group_count
counts = nonempty_counts + [0] * empty_group_count
numpy.random.shuffle(counts)
mixture = Model.Mixture()
id_tracker = MixtureIdTracker()
print 'init'
mixture.init(model, counts)
id_tracker.init(len(counts))
check_counts(mixture, counts, empty_group_count)
check_scores(mixture, counts, empty_group_count)
print 'adding'
groupids = []
for _ in xrange(sample_count):
check_counts(mixture, counts, empty_group_count)
scores = check_scores(mixture, counts, empty_group_count)
probs = scores_to_probs(scores)
groupid = sample_discrete(probs)
expected_group_added = (counts[groupid] == 0)
counts[groupid] += 1
actual_group_added = mixture.add_value(model, groupid)
assert_equal(actual_group_added, expected_group_added)
groupids.append(groupid)
if actual_group_added:
id_tracker.add_group()
counts.append(0)
check_counts(mixture, counts, empty_group_count)
check_scores(mixture, counts, empty_group_count)
print 'removing'
for global_groupid in groupids:
groupid = id_tracker.global_to_packed(global_groupid)
counts[groupid] -= 1
expected_group_removed = (counts[groupid] == 0)
actual_group_removed = mixture.remove_value(model, groupid)
assert_equal(actual_group_removed, expected_group_removed)
if expected_group_removed:
id_tracker.remove_group(groupid)
back = counts.pop()
if groupid < len(counts):
counts[groupid] = back
check_counts(mixture, counts, empty_group_count)
check_scores(mixture, counts, empty_group_count)
|
|
"""
Authenticator to use GitHub OAuth with JupyterHub
"""
import json
import os
import re
import string
import warnings
from tornado.auth import OAuth2Mixin
from tornado import web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient, HTTPError
from jupyterhub.auth import LocalAuthenticator
from traitlets import List, Set, Unicode, default
from .common import next_page_from_links
from .oauth2 import OAuthLoginHandler, OAuthenticator
def _api_headers(access_token):
return {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "token {}".format(access_token),
}
class GitHubOAuthenticator(OAuthenticator):
# see github_scopes.md for details about scope config
# set scopes via config, e.g.
# c.GitHubOAuthenticator.scope = ['read:org']
login_service = "GitHub"
github_url = Unicode("https://github.com", config=True)
@default("github_url")
def _github_url_default(self):
github_url = os.environ.get("GITHUB_URL")
if not github_url:
# fallback on older GITHUB_HOST config,
# treated the same as GITHUB_URL
host = os.environ.get("GITHUB_HOST")
if host:
if os.environ.get("GITHUB_HTTP"):
protocol = "http"
warnings.warn(
'Use of GITHUB_HOST with GITHUB_HTTP might be deprecated in the future. '
'Use GITHUB_URL=http://{} to set host and protocol together.'.format(
host
),
PendingDeprecationWarning,
)
else:
protocol = "https"
github_url = "{}://{}".format(protocol, host)
if github_url:
if '://' not in github_url:
# ensure protocol is included, assume https if missing
github_url = 'https://' + github_url
return github_url
else:
# nothing specified, this is the true default
github_url = "https://github.com"
# ensure no trailing slash
return github_url.rstrip("/")
github_api = Unicode("https://api.github.com", config=True)
@default("github_api")
def _github_api_default(self):
if self.github_url == "https://github.com":
return "https://api.github.com"
else:
return self.github_url + "/api/v3"
@default("authorize_url")
def _authorize_url_default(self):
return "%s/login/oauth/authorize" % (self.github_url)
@default("token_url")
def _token_url_default(self):
return "%s/login/oauth/access_token" % (self.github_url)
# deprecated names
github_client_id = Unicode(config=True, help="DEPRECATED")
def _github_client_id_changed(self, name, old, new):
self.log.warning("github_client_id is deprecated, use client_id")
self.client_id = new
github_client_secret = Unicode(config=True, help="DEPRECATED")
def _github_client_secret_changed(self, name, old, new):
self.log.warning("github_client_secret is deprecated, use client_secret")
self.client_secret = new
client_id_env = 'GITHUB_CLIENT_ID'
client_secret_env = 'GITHUB_CLIENT_SECRET'
github_organization_whitelist = Set(
config=True, help="Automatically whitelist members of selected organizations"
)
async def authenticate(self, handler, data=None):
"""We set up auth_state based on additional GitHub info if we
receive it.
"""
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for a GitHub Access Token
#
# See: https://developer.github.com/v3/oauth/
# GitHub specifies a POST request yet requires URL parameters
params = dict(
client_id=self.client_id, client_secret=self.client_secret, code=code
)
url = url_concat(self.token_url, params)
req = HTTPRequest(
url,
method="POST",
headers={"Accept": "application/json"},
body='', # Body is required for a POST...
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
if 'access_token' in resp_json:
access_token = resp_json['access_token']
elif 'error_description' in resp_json:
raise HTTPError(
403,
"An access token was not returned: {}".format(
resp_json['error_description']
),
)
else:
raise HTTPError(500, "Bad response: %s".format(resp))
# Determine who the logged in user is
req = HTTPRequest(
self.github_api + "/user", method="GET", headers=_api_headers(access_token)
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["login"]
# username is now the GitHub userid.
if not username:
return None
# Check if user is a member of any whitelisted organizations.
# This check is performed here, as it requires `access_token`.
if self.github_organization_whitelist:
for org in self.github_organization_whitelist:
user_in_org = await self._check_organization_whitelist(
org, username, access_token
)
if user_in_org:
break
else: # User not found in member list for any organisation
self.log.warning("User %s is not in org whitelist", username)
return None
userdict = {"name": username}
# Now we set up auth_state
userdict["auth_state"] = auth_state = {}
# Save the access token and full GitHub reply (name, id, email) in auth state
# These can be used for user provisioning in the Lab/Notebook environment.
# e.g.
# 1) stash the access token
# 2) use the GitHub ID as the id
# 3) set up name/email for .gitconfig
auth_state['access_token'] = access_token
# store the whole user model in auth_state.github_user
auth_state['github_user'] = resp_json
# A public email will return in the initial query (assuming default scope).
# Private will not.
return userdict
async def _check_organization_whitelist(self, org, username, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
# Check membership of user `username` for organization `org` via api [check-membership](https://developer.github.com/v3/orgs/members/#check-membership)
# With empty scope (even if authenticated by an org member), this
# will only await public org members. You want 'read:org' in order
# to be able to iterate through all members.
check_membership_url = "%s/orgs/%s/members/%s" % (
self.github_api,
org,
username,
)
req = HTTPRequest(check_membership_url, method="GET", headers=headers)
self.log.debug(
"Checking GitHub organization membership: %s in %s?", username, org
)
resp = await http_client.fetch(req, raise_error=False)
print(resp)
if resp.code == 204:
self.log.info("Allowing %s as member of %s", username, org)
return True
else:
try:
resp_json = json.loads((resp.body or b'').decode('utf8', 'replace'))
message = resp_json.get('message', '')
except ValueError:
message = ''
self.log.debug(
"%s does not appear to be a member of %s (status=%s): %s",
username,
org,
resp.code,
message,
)
return False
class LocalGitHubOAuthenticator(LocalAuthenticator, GitHubOAuthenticator):
"""A version that mixes in local system user creation"""
pass
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import mock
from oslo.utils import timeutils
import six
from six.moves.urllib import parse as urlparse
from testtools import matchers
from keystoneclient import session
from keystoneclient.tests.v3 import client_fixtures
from keystoneclient.tests.v3 import utils
from keystoneclient.v3.contrib.oauth1 import access_tokens
from keystoneclient.v3.contrib.oauth1 import auth
from keystoneclient.v3.contrib.oauth1 import consumers
from keystoneclient.v3.contrib.oauth1 import request_tokens
try:
import oauthlib
from oauthlib import oauth1
except ImportError:
oauth1 = None
class BaseTest(utils.TestCase):
def setUp(self):
super(BaseTest, self).setUp()
if oauth1 is None:
self.skipTest('oauthlib package not available')
class ConsumerTests(BaseTest, utils.CrudTests):
def setUp(self):
super(ConsumerTests, self).setUp()
self.key = 'consumer'
self.collection_key = 'consumers'
self.model = consumers.Consumer
self.manager = self.client.oauth1.consumers
self.path_prefix = 'OS-OAUTH1'
def new_ref(self, **kwargs):
kwargs = super(ConsumerTests, self).new_ref(**kwargs)
kwargs.setdefault('description', uuid.uuid4().hex)
return kwargs
def test_description_is_optional(self):
consumer_id = uuid.uuid4().hex
resp_ref = {'consumer': {'description': None,
'id': consumer_id}}
self.stub_url('POST',
[self.path_prefix, self.collection_key],
status_code=201, json=resp_ref)
consumer = self.manager.create()
self.assertEqual(consumer_id, consumer.id)
self.assertIsNone(consumer.description)
def test_description_not_included(self):
consumer_id = uuid.uuid4().hex
resp_ref = {'consumer': {'id': consumer_id}}
self.stub_url('POST',
[self.path_prefix, self.collection_key],
status_code=201, json=resp_ref)
consumer = self.manager.create()
self.assertEqual(consumer_id, consumer.id)
class TokenTests(BaseTest):
def _new_oauth_token(self):
key = uuid.uuid4().hex
secret = uuid.uuid4().hex
params = {'oauth_token': key, 'oauth_token_secret': secret}
token = urlparse.urlencode(params)
return (key, secret, token)
def _new_oauth_token_with_expires_at(self):
key, secret, token = self._new_oauth_token()
expires_at = timeutils.strtime()
params = {'oauth_token': key,
'oauth_token_secret': secret,
'oauth_expires_at': expires_at}
token = urlparse.urlencode(params)
return (key, secret, expires_at, token)
def _validate_oauth_headers(self, auth_header, oauth_client):
"""Assert that the data in the headers matches the data
that is produced from oauthlib.
"""
self.assertThat(auth_header, matchers.StartsWith('OAuth '))
auth_header = auth_header[len('OAuth '):]
# NOTE(stevemar): In newer versions of oauthlib there is
# an additional argument for getting oauth parameters.
# Adding a conditional here to revert back to no arguments
# if an earlier version is detected.
if tuple(oauthlib.__version__.split('.')) > ('0', '6', '1'):
header_params = oauth_client.get_oauth_params(None)
else:
header_params = oauth_client.get_oauth_params()
parameters = dict(header_params)
self.assertEqual('HMAC-SHA1', parameters['oauth_signature_method'])
self.assertEqual('1.0', parameters['oauth_version'])
self.assertIsInstance(parameters['oauth_nonce'], six.string_types)
self.assertEqual(oauth_client.client_key,
parameters['oauth_consumer_key'])
if oauth_client.resource_owner_key:
self.assertEqual(oauth_client.resource_owner_key,
parameters['oauth_token'],)
if oauth_client.verifier:
self.assertEqual(oauth_client.verifier,
parameters['oauth_verifier'])
if oauth_client.callback_uri:
self.assertEqual(oauth_client.callback_uri,
parameters['oauth_callback'])
if oauth_client.timestamp:
self.assertEqual(oauth_client.timestamp,
parameters['oauth_timestamp'])
return parameters
class RequestTokenTests(TokenTests):
def setUp(self):
super(RequestTokenTests, self).setUp()
self.model = request_tokens.RequestToken
self.manager = self.client.oauth1.request_tokens
self.path_prefix = 'OS-OAUTH1'
def test_authorize_request_token(self):
request_key = uuid.uuid4().hex
info = {'id': request_key,
'key': request_key,
'secret': uuid.uuid4().hex}
request_token = request_tokens.RequestToken(self.manager, info)
verifier = uuid.uuid4().hex
resp_ref = {'token': {'oauth_verifier': verifier}}
self.stub_url('PUT',
[self.path_prefix, 'authorize', request_key],
status_code=200, json=resp_ref)
# Assert the manager is returning the expected data
role_id = uuid.uuid4().hex
token = request_token.authorize([role_id])
self.assertEqual(verifier, token.oauth_verifier)
# Assert that the request was sent in the expected structure
exp_body = {'roles': [{'id': role_id}]}
self.assertRequestBodyIs(json=exp_body)
def test_create_request_token(self):
project_id = uuid.uuid4().hex
consumer_key = uuid.uuid4().hex
consumer_secret = uuid.uuid4().hex
request_key, request_secret, resp_ref = self._new_oauth_token()
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
self.stub_url('POST', [self.path_prefix, 'request_token'],
status_code=201, text=resp_ref, headers=headers)
# Assert the manager is returning request token object
request_token = self.manager.create(consumer_key, consumer_secret,
project_id)
self.assertIsInstance(request_token, self.model)
self.assertEqual(request_key, request_token.key)
self.assertEqual(request_secret, request_token.secret)
# Assert that the project id is in the header
self.assertRequestHeaderEqual('requested_project_id', project_id)
req_headers = self.requests.last_request.headers
oauth_client = oauth1.Client(consumer_key,
client_secret=consumer_secret,
signature_method=oauth1.SIGNATURE_HMAC,
callback_uri="oob")
self._validate_oauth_headers(req_headers['Authorization'],
oauth_client)
class AccessTokenTests(TokenTests):
def setUp(self):
super(AccessTokenTests, self).setUp()
self.manager = self.client.oauth1.access_tokens
self.model = access_tokens.AccessToken
self.path_prefix = 'OS-OAUTH1'
def test_create_access_token_expires_at(self):
verifier = uuid.uuid4().hex
consumer_key = uuid.uuid4().hex
consumer_secret = uuid.uuid4().hex
request_key = uuid.uuid4().hex
request_secret = uuid.uuid4().hex
t = self._new_oauth_token_with_expires_at()
access_key, access_secret, expires_at, resp_ref = t
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
self.stub_url('POST', [self.path_prefix, 'access_token'],
status_code=201, text=resp_ref, headers=headers)
# Assert that the manager creates an access token object
access_token = self.manager.create(consumer_key, consumer_secret,
request_key, request_secret,
verifier)
self.assertIsInstance(access_token, self.model)
self.assertEqual(access_key, access_token.key)
self.assertEqual(access_secret, access_token.secret)
self.assertEqual(expires_at, access_token.expires)
req_headers = self.requests.last_request.headers
oauth_client = oauth1.Client(consumer_key,
client_secret=consumer_secret,
resource_owner_key=request_key,
resource_owner_secret=request_secret,
signature_method=oauth1.SIGNATURE_HMAC,
verifier=verifier,
timestamp=expires_at)
self._validate_oauth_headers(req_headers['Authorization'],
oauth_client)
class AuthenticateWithOAuthTests(TokenTests):
def setUp(self):
super(AuthenticateWithOAuthTests, self).setUp()
if oauth1 is None:
self.skipTest('optional package oauthlib is not installed')
def test_oauth_authenticate_success(self):
consumer_key = uuid.uuid4().hex
consumer_secret = uuid.uuid4().hex
access_key = uuid.uuid4().hex
access_secret = uuid.uuid4().hex
# Just use an existing project scoped token and change
# the methods to oauth1, and add an OS-OAUTH1 section.
oauth_token = client_fixtures.project_scoped_token()
oauth_token['methods'] = ["oauth1"]
oauth_token['OS-OAUTH1'] = {"consumer_id": consumer_key,
"access_token_id": access_key}
self.stub_auth(json=oauth_token)
a = auth.OAuth(self.TEST_URL, consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_key=access_key,
access_secret=access_secret)
s = session.Session(auth=a)
t = s.get_token()
self.assertEqual(self.TEST_TOKEN, t)
OAUTH_REQUEST_BODY = {
"auth": {
"identity": {
"methods": ["oauth1"],
"oauth1": {}
}
}
}
self.assertRequestBodyIs(json=OAUTH_REQUEST_BODY)
# Assert that the headers have the same oauthlib data
req_headers = self.requests.last_request.headers
oauth_client = oauth1.Client(consumer_key,
client_secret=consumer_secret,
resource_owner_key=access_key,
resource_owner_secret=access_secret,
signature_method=oauth1.SIGNATURE_HMAC)
self._validate_oauth_headers(req_headers['Authorization'],
oauth_client)
class TestOAuthLibModule(utils.TestCase):
def test_no_oauthlib_installed(self):
with mock.patch.object(auth, 'oauth1', None):
self.assertRaises(NotImplementedError,
auth.OAuth,
self.TEST_URL,
consumer_key=uuid.uuid4().hex,
consumer_secret=uuid.uuid4().hex,
access_key=uuid.uuid4().hex,
access_secret=uuid.uuid4().hex)
|
|
import copy
from flask import current_app as app, abort
from eve.utils import config, debug_error_message, ParsedRequest
from werkzeug.exceptions import BadRequestKeyError
def versioned_id_field(resource_settings):
""" Shorthand to add two commonly added versioning parameters.
.. versionadded: 0.4
"""
return resource_settings['id_field'] + app.config['VERSION_ID_SUFFIX']
def resolve_document_version(document, resource, method, latest_doc=None):
""" Version number logic for all methods.
:param document: the document in question.
:param resource: the resource of the request/document.
:param method: method coorsponding to the request.
:param latest_doc: the most recent version of the document.
.. versionadded:: 0.4
"""
resource_def = app.config['DOMAIN'][resource]
version = app.config['VERSION']
latest_version = app.config['LATEST_VERSION']
if resource_def['versioning'] is True:
# especially on collection endpoints, we don't to encure an extra
# lookup if we are already pulling the latest version
if method == 'GET' and latest_doc is None:
if version not in document:
# well it should be... the api designer must have turned on
# versioning after data was already in the collection or the
# collection has been modified without respecting versioning
document[version] = 1 # the first saved version will be 2
document[latest_version] = document[version]
# include latest_doc if the request is for an older version so that we
# can set the latest_version field in the response
if method == 'GET' and latest_doc is not None:
if version not in latest_doc:
# well it should be... the api designer must have turned on
# versioning after data was already in the collection or the
# collection has been modified without respecting versioning
document[version] = 1 # the first saved version will be 2
document[latest_version] = document[version]
else:
document[latest_version] = latest_doc[version]
if version not in document:
# this version was put in the database before versioning
# was turned on or outside of Eve
document[version] = 1
if method == 'POST':
# this one is easy! it is a new document
document[version] = 1
if method == 'PUT' or method == 'PATCH' or \
(method == 'DELETE' and resource_def['soft_delete'] is True):
if not latest_doc:
abort(500, description=debug_error_message(
'I need the latest document here!'
))
if version in latest_doc:
# all is right in the world :)
document[version] = latest_doc[version] + 1
else:
# if versioning was just turned on, then we will start
# versioning now. if the db was modified outside of Eve or
# versioning was turned of for a while, version numbers will
# not be consistent! you have been warned
document[version] = 1
def late_versioning_catch(document, resource):
""" Insert versioning copy of document for the previous version of a
document if it is missing. Intended for PUT and PATCH.
:param resource: the resource of the request/document.
:param ids: a list of id number coorsponding to the documents parameter.
:param document: the documents be written by POST, PUT, or PATCH.
.. versionadded:: 0.4
"""
resource_def = app.config['DOMAIN'][resource]
version = app.config['VERSION']
if resource_def['versioning'] is True:
# TODO: Could directly check that there are no shadow copies for this
# document. If there are shadow copies but the version field is in the
# stored document, then something is wrong. (Modified outside of Eve?)
if version not in document:
# The API maintainer must of turned on versioning after the
# document was added to the database, so let's add this old version
# to the shadow collection now as if it was a new document.
resolve_document_version(document, resource, 'POST')
insert_versioning_documents(resource, document)
def insert_versioning_documents(resource, documents):
""" Insert versioning copy of document. Intended for POST, PUT, and PATCH.
:param resource: the resource of the request/document.
:param documents: the documents be written by POST, PUT, or PATCH.
.. versionadded:: 0.4
"""
resource_def = app.config['DOMAIN'][resource]
_id = resource_def['id_field']
# push back versioned items if applicable
# note: MongoDB doesn't have transactions! if the server dies, no
# history will be saved.
if resource_def['versioning'] is True:
# force input as lists
if not isinstance(documents, list):
documents = [documents]
# build vesioning documents
version = app.config['VERSION']
versioned_documents = []
for index, document in enumerate(documents):
ver_doc = {}
# push normal fields
fields = versioned_fields(resource_def)
for field in document:
if field in fields:
ver_doc[field] = document[field]
# push special fields
ver_doc[versioned_id_field(resource_def)] = document[_id]
ver_doc[version] = document[version]
# add document to the stack
versioned_documents.append(ver_doc)
# bulk insert
app.data.insert(resource + app.config['VERSIONS'], versioned_documents)
def versioned_fields(resource_def):
""" Returns a list of versioned fields for a resource.
:param resource_def: a resource definition.
.. versionchanged:: 0.6
Added DELETED as versioned field for soft delete (#335)
.. versionchanged:: 0.5
ETAG is now a versioned field (#369).
.. versionadded:: 0.4
"""
if resource_def['versioning'] is not True:
return []
schema = resource_def['schema']
fields = [f for f in schema
if schema[f].get('versioned', True) is True and
f != resource_def['id_field']]
fields.extend((app.config['LAST_UPDATED'],
app.config['ETAG'],
app.config['DELETED'],
))
return fields
def diff_document(resource_def, old_doc, new_doc):
""" Returns a list of added or modified fields.
:param resource_def: a resource definition.
:param old_doc: the document to compare against.
:param new_doc: the document in question.
.. versionadded:: 0.4
"""
diff = {}
fields = list(resource_def['schema'].keys()) + [
app.config['VERSION'],
app.config['LATEST_VERSION'],
resource_def['id_field'],
app.config['LAST_UPDATED'],
app.config['DATE_CREATED'],
app.config['ETAG'],
app.config['LINKS']]
if resource_def['soft_delete'] is True:
fields.append(app.config['DELETED'])
for field in fields:
if field in new_doc and \
(field not in old_doc or new_doc[field] != old_doc[field]):
diff[field] = new_doc[field]
# This method does not show when fields are deleted.
for field in app.config['VERSION_DIFF_INCLUDE']:
if field in new_doc:
diff[field] = new_doc[field]
return diff
def synthesize_versioned_document(document, delta, resource_def):
""" Synthesizes an old document from the latest document and the values of
all versioned fields from the old version. This is accomplished by removing
all versioned fields from the latest document before updating fields to
ensure that fields with required=False can be removed.
:param document: the current version of a document.
:param delta: the versioned fields from a specific document version.
:param resource_def: a resource definition.
.. versionadded:: 0.4
"""
old_doc = copy.deepcopy(document)
id_field = versioned_id_field(resource_def)
if id_field not in delta:
abort(400, description=debug_error_message(
'You must include %s in any projection with a version query.'
% id_field
))
delta[resource_def['id_field']] = delta[id_field]
del delta[id_field]
# remove all versioned fields from document
fields = versioned_fields(resource_def)
for field in document:
if field in fields:
del old_doc[field]
# add versioned fields
old_doc.update(delta)
return old_doc
def get_old_document(resource, req, lookup, document, version):
""" Returns an old document if appropriate, otherwise returns a copy of the
given document.
:param resource: the name of the resource.
:param req: the parsed request object.
:param lookup: a dictionary of lookup parameters.
:param document: the current version of the document.
:param version: the value of the version request parameter.
.. versionadded:: 0.4
"""
if version != 'all' and version != 'diffs' and version is not None:
try:
version = int(version)
assert version > 0
except (ValueError, BadRequestKeyError, AssertionError):
abort(400, description=debug_error_message(
'Document version number should be an int greater than 0'
))
# parameters to find specific document version
resource_def = config.DOMAIN[resource]
if versioned_id_field(resource_def) not in lookup:
lookup[versioned_id_field(resource_def)] \
= lookup[resource_def['id_field']]
del lookup[resource_def['id_field']]
lookup[config.VERSION] = version
# synthesize old document from latest and delta
delta = app.data.find_one(resource + config.VERSIONS, req, **lookup)
if not delta:
abort(404)
old_document = synthesize_versioned_document(
document, delta, resource_def)
else:
old_document = copy.deepcopy(document)
return old_document
def get_data_version_relation_document(data_relation, reference, latest=False):
""" Returns document at the version specified in data_relation, or at the
latest version if passed `latest=True`. Returns None if data_relation
cannot be satisfied.
:param data_relation: the schema definition describing the data_relation.
:param reference: a dictionary with a value_field and a version_field.
:param latest: if we should obey the version param in reference or not.
.. versionadded:: 0.4
"""
value_field = data_relation['field']
version_field = app.config['VERSION']
collection = data_relation['resource']
versioned_collection = collection + config.VERSIONS
resource_def = app.config['DOMAIN'][data_relation['resource']]
id_field = resource_def['id_field']
# Fetch document data at the referenced version
query = {version_field: reference[version_field]}
if value_field == id_field:
# Versioned documents store the primary id in a different field
query[versioned_id_field(resource_def)] = reference[value_field]
elif value_field not in versioned_fields(resource_def):
# The relation value field is unversioned, and will not be present in
# the versioned collection. Need to find id field for version query
req = ParsedRequest()
if resource_def['soft_delete']:
req.show_deleted = True
latest_version = app.data.find_one(
collection, req, **{value_field: reference[value_field]})
if not latest_version:
return None
query[versioned_id_field(resource_def)] = latest_version[id_field]
else:
# Field will be present in the versioned collection
query[value_field] = reference[value_field]
referenced_version = app.data.find_one(versioned_collection, None, **query)
# support late versioning
if referenced_version is None and reference[version_field] == 1:
# there is a chance this document hasn't been saved
# since versioning was turned on
referenced_version = missing_version_field(data_relation, reference)
return referenced_version # v1 is both referenced and latest
if referenced_version is None:
return None # The referenced document version was not found
# Fetch the latest version of this document to use in version synthesis
query = {id_field: referenced_version[versioned_id_field(resource_def)]}
req = ParsedRequest()
if resource_def['soft_delete']:
# Still return latest after soft delete. It is needed to synthesize
# full document version.
req.show_deleted = True
latest_version = app.data.find_one(collection, req, **query)
if latest is True:
return latest_version
# Syntheisze referenced version from latest and versioned data
document = synthesize_versioned_document(
latest_version, referenced_version, resource_def)
return document
def missing_version_field(data_relation, reference):
""" Returns a document if it matches the value_field but doesn't have a
_version field. This is the scenario when there is data in the database
before document versioning is turned on.
:param data_relation: the schema definition describing the data_relation.
:param reference: a dictionary with a value_field and a version_field.
.. versionadded:: 0.4
"""
value_field = data_relation['field']
version_field = app.config['VERSION']
collection = data_relation['resource']
query = {}
query[value_field] = reference[value_field]
query[version_field] = {'$exists': False}
return app.data.find_one(collection, None, **query)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.